metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonathanvanschenck/DanknD",
"score": 2
} |
#### File: app/game/events.py
```python
from flask import copy_current_request_context
from flask_socketio import emit, disconnect, join_room
from flask_login import current_user
from app import socketio,db
from app.models import Post, Game, Chapter, Scene
from app.game.roll_parser import roll_msg
# ---- Helper Functions ----
def _pyobj_to_obj_list(object, result_list, depth, user = None):
json = {}
try:
json['dest_id'] = object.scene.get_inner_HTML_id()
except AttributeError:
pass
try:
json['dest_id'] = object.chapter.get_inner_HTML_id()
except AttributeError:
pass
json.update({
"html" : object.to_HTML(user),
"id" : object.get_outer_HTML_id()
})
result_list.append(json)
if depth > 0:
child_list = []
try:
child_list = object.scenes
except AttributeError:
pass
try:
child_list = object.posts
except AttributeError:
pass
for c in child_list:
_pyobj_to_obj_list(c, result_list, depth = depth-1, user = user)
return
def json_to_object(json):
model = {"post":Post,"chapter":Chapter,"scene":Scene}[json['type'].lower()]
return model.query.get(json['objid'])
def generate_obj_list(json_list, user = None):
"""json_list = [json,json,...]
json = {"objid":int,"type":str,"recursive":bool,depth:int}
objid: int used by model.query.get() to pull sqlalchemy object
type: str used to select model type (ie "Post" or "Chapter"),
insenstitive to case
depth: interger (or string) specifying what tree depth to recrusively
pull children from (0 (or "none") -> no children, 1 -> children,
2 (or "all") -> children of children)
"""
result_list = []
for json in json_list:
depth = json.pop('depth',0)
try:
depth = {"none":0,"all":2}[depth]
except KeyError:
pass
_pyobj_to_obj_list(
json_to_object(json),
result_list,
depth,
user
)
return result_list
# --- Socketio Functions ---
@socketio.on('connect', namespace='/game')
def on_connect():
emit('log', {'data': 'Connected at server!'})
@socketio.on('disconnect_request')
def on_disconnect_request():
@copy_current_request_context
def can_disconnect():
disconnect()
return emit('log', {'data': 'Disconnected from server!'}, callback=can_disconnect)
@socketio.on('join', namespace='/game')
def on_join(msg):
join_room(msg['gameid'])
emit('log', {'data': 'Joined room: '+msg['gameid']})
game = Game.query.get(int(msg['gameid']))
if game is None:
disconnect()
return
obj_list = generate_obj_list(
[{
"objid":c.id,
"type":"Chapter",
"depth":["none","all"][int(c is game.current_chapter)]
} for c in game.chapters],
current_user
)
emit('render_objects',
{
'object_list':obj_list,
'clear_all':True
},
room = msg['gameid'],
broacast = True
)
@socketio.on('echo', namespace='/game')
def on_echo(msg):
emit('log', msg)
@socketio.on('create_post', namespace='/game')
def on_create_post(msg):
if current_user.is_anonymous:
return
game = Game.query.get(int(msg['gameid']))
# Make sure use has post priveleges
if not game.has_member(current_user):
return
scene = game.current_scene
speaker = msg['speaker']
# Make sure no one hacks the form to speak for another character
if not speaker in [c.name for c in current_user.owned_characters]:
speaker = "Narrator"
p = Post(
speaker = speaker,
body = roll_msg(msg['body']),
poster_id = current_user.id,
scene = scene,
)
db.session.add(p)
db.session.commit()
obj_list = generate_obj_list(
[{"objid":p.id,"type":"Post","depth":"none"}],
current_user
)
emit(
'render_objects',
{
'object_list':obj_list,
'clear_all':False
},
room = msg['gameid'],
broacast = True
)
@socketio.on('set_typing', namespace='/game')
def on_set_typing(msg):
emit(
'is_typing',
msg,
room = msg['gameid'],
broadcast = True
)
@socketio.on('get_children', namespace='/game')
def on_get_children(msg):
obj_list = generate_obj_list(
msg['json_list'],
current_user
)
emit(
'render_objects',
{
'object_list':obj_list,
'clear_all':False,
'skip_scroll':True
},
room = msg['gameid'],
broacast = False
)
@socketio.on('get_currents', namespace='/game')
def on_get_currents(msg):
game = Game.query.get(int(msg['gameid']))
chapter = game.current_chapter
scene = game.current_scene
emit(
'modify_currents',
{
"current_chapter_id":chapter.get_outer_HTML_id(),
"current_scene_id":scene.get_outer_HTML_id(),
"current_scene_body_id":scene.get_inner_HTML_id()
},
room = msg['gameid']
)
def set_currents(gameid):
game = Game.query.get(gameid)
chapter = game.current_chapter
scene = game.current_scene
emit(
'modify_currents',
{
"current_chapter_id":chapter.get_outer_HTML_id(),
"current_scene_id":scene.get_outer_HTML_id(),
"current_scene_body_id":scene.get_inner_HTML_id()
},
room = str(gameid),
broadcast = True,
namespace = '/game'
)
```
#### File: app/game/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, BooleanField,\
TextAreaField, PasswordField
from wtforms.validators import DataRequired, EqualTo, ValidationError
from app.models import Character, Game
from app.game.roll_parser import validate_error, roll_msg
# --- Games ---
class CreateGameForm(FlaskForm):
game_name = StringField('Game Name', validators=[DataRequired()])
blurb = TextAreaField('Summary', validators=[DataRequired()])
chapter_name = StringField('First Chapter Name', validators=[DataRequired()], default="1")
scene_name = StringField('First Scene Name', validators=[DataRequired()], default="1")
password = PasswordField('Password (leave blank for none)')
password2 = PasswordField(
'Repeat Password', validators=[EqualTo('password')])
player_max = StringField('Player Max:', validators=[DataRequired()],
default="5")
submit = SubmitField('Create Game')
def get_player_max(self):
try:
return int(self.player_max.data)
except (ValueError, TypeError):
return None
def validate_player_max(self, field):
pm = self.get_player_max()
if pm is None:
raise ValidationError("Player max must be an integer")
elif pm < 1:
raise ValidationError("Player max must be positive")
class EditGameForm(FlaskForm):
name = StringField('Game Name', validators=[DataRequired()])
blurb = TextAreaField('Summary', validators=[DataRequired()])
player_max = StringField('Player Max:', validators=[DataRequired()])
submit = SubmitField('Submit')
def __init__(self,game,*args,**kwargs):
FlaskForm.__init__(self,*args,**kwargs)
self.game = game
def get_player_max(self):
try:
return int(self.player_max.data)
except (ValueError, TypeError):
return None
def set_player_max(self,integer):
self.player_max.data = str(integer)
def validate_player_max(self, field):
pm = self.get_player_max()
if pm is None:
raise ValidationError("Player max must be an integer")
elif pm < 1:
raise ValidationError("Player max must be positive")
elif len(self.game.players) > pm:
raise ValidationError(
"Current player number ({}) exceeds desired player max".format(
len(self.game.players)
)
)
class JoinGameForm(FlaskForm):
name = StringField('Character Name', validators=[DataRequired()])
visible = BooleanField('Make Character Visible?',default=True)
password = PasswordField('<PASSWORD>')
submit = SubmitField('Join')
def __init__(self,game,*args,**kwargs):
FlaskForm.__init__(self,*args,**kwargs)
self.game = game
def validate_name(self,field):
if field.data in [c.name for c in self.game.characters]:
raise ValidationError("Character Name Already in Use")
def validate_password(self,field):
if not self.game.check_password(field.data):
raise ValidationError("Incorrect Password")
# --- Chapters ---
class CreateChapterForm(FlaskForm):
chapter_name = StringField('Chapter Name', validators=[DataRequired()])
scene_name = StringField('First Scene Name', validators=[DataRequired()], default="1")
make_current = BooleanField('Make Current', default=True)
submit = SubmitField('Create Chapter')
class EditChapterForm(FlaskForm):
chapter_name = StringField('Chapter Name', validators=[DataRequired()])
make_current = BooleanField('Make Current')
submit = SubmitField('Submit')
# --- Scenes ----
class CreateSceneForm(FlaskForm):
scene_name = StringField('Scene Name', validators=[DataRequired()])
make_current = BooleanField('Make Current', default=True)
submit = SubmitField('Create Chapter')
class EditSceneForm(FlaskForm):
scene_name = StringField('Scene Name', validators=[DataRequired()])
make_current = BooleanField('Make Current')
submit = SubmitField('Submit')
# --- Posts ----
class EditPostForm(FlaskForm):
speaker = SelectField('Speaker', choices=[])
body = TextAreaField('Body')
submit = SubmitField('Submit')
def validate_body(self, field):
try:
self.body_rolled = roll_msg(field.data)
except SyntaxError as E:
raise ValidationError(E.args[0])
# --- Extras ---
class ConfirmDeleteForm(FlaskForm):
confirm = BooleanField('This cannot be undone, please be sure...',
validators=[DataRequired()])
delete = SubmitField('Delete')
class ModifyPasswordForm(FlaskForm):
password = PasswordField('<PASSWORD>')
password2 = PasswordField(
'Repeat Password', validators=[EqualTo('password')])
change = SubmitField('Change Password')
# --- Characters ---
class CreateCharacterForm(FlaskForm):
name = StringField('Character Name', validators=[DataRequired()])
game = SelectField('Select Game', coerce = int)
public = BooleanField('Make Character Visible?', default=True)
submit = SubmitField('Create Character')
class EditCharacterForm(FlaskForm):
game = SelectField('Game', coerce = int)
public = BooleanField('Make Character Visible?', default=True)
submit = SubmitField('Update Character')
class DeleteCharacterForm(FlaskForm):
name = StringField('Type Character Name', validators=[DataRequired()])
submit = SubmitField('Yes, Delete This Character Forever')
```
#### File: app/game/routes.py
```python
from flask import url_for, render_template, flash, redirect
from flask_login import current_user, login_required
from app import db
from app.game import bp
from app.models import Game, Character, Chapter, Scene, Post, User
from app.game.forms import CreateGameForm, EditGameForm, CreateCharacterForm,\
DeleteCharacterForm, EditCharacterForm,\
CreateChapterForm, EditChapterForm,\
CreateSceneForm, EditSceneForm,\
EditPostForm,\
ConfirmDeleteForm, ModifyPasswordForm,\
JoinGameForm
from app.game.events import set_currents
# --- Games ---
@bp.route('/games')
def games():
game_list = Game.query.all()
return render_template("game/games.html", game_list = game_list)
@bp.route('/game/<gameid>')
def game(gameid):
g = Game.query.get_or_404(gameid)
if current_user.is_anonymous or ((not current_user in g.players) and (current_user!=g.owner)):
return render_template('game/game_viewer.html', game=g)
c = Character.query.filter_by(game=g, player=current_user)
option_list = [n.name for n in c]
return render_template('game/game_interactive.html', option_list=option_list, game=g, user=current_user)
@bp.route('/join/<gameid>', methods=['GET', 'POST'])
@login_required
def join(gameid):
g = Game.query.get_or_404(gameid)
if g.has_member(current_user):
return redirect(url_for('game.game',gameid=gameid))
if g.is_full():
flash("That game is already full!")
return redirect(url_for('game.game',gameid=gameid))
form = JoinGameForm(g)
if form.validate_on_submit():
c = Character(name=form.name.data,player=current_user,
game=g,public=form.visible.data)
g.players.append(current_user)
db.session.add(c)
db.session.commit()
flash('Welcome to the game!')
return redirect(url_for('game.game',gameid=gameid))
return render_template('game/join.html', game=g, form=form)
@bp.route('/game/<gameid>/abandon', methods=['GET', 'POST'])
@login_required
def abandon(gameid):
return remove(gameid,user=current_user,boot=False)
@bp.route('/game/<gameid>/boot/<userid>', methods=['GET', 'POST'])
@login_required
def boot(gameid,userid):
user = User.query.get_or_404(userid)
return remove(gameid,user=user,boot=True)
def remove(gameid,user,boot):
g = Game.query.get_or_404(gameid)
if not g.has_player(user):
return redirect(url_for('game.game',gameid=gameid))
form = ConfirmDeleteForm()
form.delete.label.text = ["Abandon","Boot"][boot]
if form.validate_on_submit():
g.remove_player(user)
db.session.commit()
return redirect(url_for('game.game',gameid=gameid))
return render_template('game/'+ ["abandon","boot_player"][boot] +'.html', game=g,
form=form, player=user)
@bp.route('/create_game', methods=['GET', 'POST'])
@login_required
def create_game():
form = CreateGameForm()
if form.validate_on_submit():
game = Game(name=form.game_name.data, owner=current_user,
blurb=form.blurb.data,player_max=form.get_player_max())
game.set_password(form.password.data)
chapter = Chapter(name=form.chapter_name.data, game=game)
scene = Scene(name=form.chapter_name.data, chapter=chapter)
db.session.add_all([game,chapter,scene])
game.ensure_has_current()
db.session.commit()
set_currents(int(game.id))
flash('Congratulations, you created a game called "{}"!'.format(game.name))
return redirect(url_for('game.game', gameid = game.id))
return render_template('game/create_game.html', form=form)
@bp.route('/edit_game/<gameid>', methods=['GET', 'POST'])
@login_required
def edit_game(gameid):
game = Game.query.get_or_404(gameid)
if not game.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = EditGameForm(game)
player_list = game.players
mform = ModifyPasswordForm()
dform = ConfirmDeleteForm()
if dform.delete.data and dform.validate_on_submit():
game.empty() # remove all chapters, scenes, posts, etc
for c in game.characters:
db.session.delete(c) # remove all characters
db.session.delete(game) # remove actual game
db.session.commit()
return redirect(url_for('auth.userpage', username = current_user.username))
elif mform.change.data and mform.validate_on_submit():
game.set_password(mform.password.data)
db.session.commit()
return redirect(url_for('game.game', gameid=gameid))
elif form.submit.data and form.validate_on_submit():
game.name = form.name.data
game.blurb = form.blurb.data
game.player_max = form.get_player_max()
db.session.commit()
return redirect(url_for('game.game', gameid=gameid))
else:
form.name.data = game.name
form.blurb.data = game.blurb
form.set_player_max(game.player_max)
dform.confirm.data = False
return render_template('game/edit_game.html', form=form, mform=mform,
dform=dform, game=game, player_list = player_list)
# --- Chapters ---
@bp.route('/game/<gameid>/create_chapter', methods=['GET', 'POST'])
@login_required
def create_chapter(gameid):
game = Game.query.get_or_404(gameid)
if not game.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = CreateChapterForm()
if form.validate_on_submit():
c = Chapter(name=form.chapter_name.data, game=game)
s = Scene(name=form.scene_name.data, chapter=c)
db.session.add_all([c,s])
if form.make_current:
game.current_chapter = c
c.ensure_has_current()
db.session.commit()
set_currents(int(gameid))
return redirect(url_for('game.game', gameid=gameid))
return render_template('game/create_chapter.html', form=form, game=game)
@bp.route('/game/<gameid>/chapter/<chapterid>', methods=['GET', 'POST'])
@login_required
def chapter(gameid,chapterid):
game = Game.query.get_or_404(gameid)
chapter = Chapter.query.get_or_404(chapterid)
if not chapter.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = EditChapterForm()
dform = ConfirmDeleteForm()
if form.submit.data and form.validate_on_submit():
chapter.name = form.chapter_name.data
if form.make_current:
game.current_chapter = chapter
chapter.ensure_has_current()
db.session.commit()
set_currents(int(gameid))
else:
db.session.commit()
return redirect(url_for('game.game', gameid=gameid))
elif dform.delete.data and dform.validate_on_submit():
chapter.empty()
db.session.delete(chapter)
if len(game.chapters) == 0:
c = Chapter(name="1",game=game)
db.session.add(c)
game.current_chapter = c
s = Scene(name="1",chapter=c)
db.session.add(s)
c.current_scene = s
db.session.commit()
set_currents(int(gameid))
return redirect(url_for('game.game', gameid=gameid))
else:
form.chapter_name.data = chapter.name
form.make_current.data = chapter.is_current
dform.confirm.data = False
return render_template('game/chapter.html', form=form, dform=dform,
game=game, chapter=chapter)
# --- Scene ---
@bp.route('/game/<gameid>/chapter/<chapterid>/create_scene', methods=['GET', 'POST'])
@login_required
def create_scene(gameid,chapterid):
game = Game.query.get_or_404(gameid)
chapter = Chapter.query.get_or_404(chapterid)
if not chapter.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = CreateSceneForm()
if form.validate_on_submit():
s = Scene(name=form.scene_name.data, chapter=chapter)
db.session.add(s)
if form.make_current:
chapter.current_scene = s
db.session.commit()
set_currents(int(gameid))
return redirect(url_for('game.game', gameid=gameid))
return render_template('game/create_scene.html', form=form, game=game, chapter=chapter)
@bp.route('/game/<gameid>/chapter/<chapterid>/scene/<sceneid>', methods=['GET', 'POST'])
@login_required
def scene(gameid,chapterid,sceneid):
game = Game.query.get_or_404(gameid)
chapter = Chapter.query.get_or_404(chapterid)
scene = Scene.query.get_or_404(sceneid)
if not scene.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = EditSceneForm()
dform = ConfirmDeleteForm()
if form.submit.data and form.validate_on_submit():
scene.name = form.scene_name.data
if form.make_current:
chapter.current_scene = scene
db.session.commit()
set_currents(int(gameid))
return redirect(url_for('game.game', gameid=gameid))
elif dform.delete.data and dform.validate_on_submit():
scene.empty()
db.session.delete(scene)
if len(chapter.scenes) == 0:
s = Scene(name="1",chapter=chapter)
db.session.add(s)
chapter.current_scene = s
db.session.commit()
set_currents(int(gameid))
return redirect(url_for('game.game', gameid=gameid))
else:
form.scene_name.data = scene.name
form.make_current.data = scene.is_current
dform.confirm.data = False
return render_template('game/scene.html', form=form, dform=dform,
game=game, chapter=chapter, scene = scene)
# ---- Posts -----
@bp.route('/game/<gameid>/chapter/<chapterid>/scene/<sceneid>/post/<postid>', methods=['GET', 'POST'])
@login_required
def post(gameid,chapterid,sceneid,postid):
game = Game.query.get_or_404(gameid)
chapter = Chapter.query.get_or_404(chapterid)
scene = Scene.query.get_or_404(sceneid)
post = Post.query.get_or_404(postid)
if not post.can_edit(current_user):
flash('Naughty!')
return redirect(url_for('front.index'))
form = EditPostForm()
dform = ConfirmDeleteForm()
form.speaker.choices = [("Narrator", "Narrator")]\
+ [(c.name,c.name) for c in game.characters if c.player.username == current_user.username]
if form.submit.data and form.validate_on_submit():
post.speaker = form.speaker.data
post.body = form.body_rolled#.data
db.session.commit()
return redirect(url_for('game.game', gameid=gameid))
elif dform.delete.data and dform.validate_on_submit():
db.session.delete(post)
db.session.commit()
return redirect(url_for('game.game', gameid=gameid))
else:
form.speaker.data = post.speaker
form.body.data = post.body
dform.confirm.data = False
return render_template('game/post.html', form=form, dform = dform, game = game,
chapter = chapter, scene = scene, post = post)
# --- Characters ----
@bp.route('/character/<characterid>', methods=['GET', 'POST'])
@login_required
def character(characterid):
c = Character.query.get_or_404(characterid)
if not c in current_user.owned_characters:
flash('Naughty!')
return redirect(url_for('front.index'))
form = EditCharacterForm()
form.game.choices = [(0, "None")]+[(g.id, g.name) for g in current_user.owned_games+current_user.joined_games]
if form.validate_on_submit():
g = Game.query.get(form.game.data)
c.game = g
c.public = form.public.data
db.session.commit()
flash('Updated')
return redirect(url_for('game.character', characterid = c.id))
else:
try:
form.game.data = c.game.id
except AttributeError:
form.game.data = 0
form.public.data = c.public
return render_template('game/character.html', form=form, character=c)
@bp.route('/create_character', methods=['GET', 'POST'])
@login_required
def create_character():
form = CreateCharacterForm()
form.game.choices = [(0, "None")]+[(g.id, g.name) for g in current_user.owned_games+current_user.joined_games]
if form.validate_on_submit():
g = Game.query.get(form.game.data)
c = Character(name=form.name.data, player=current_user,
game=g, public=form.public.data)
db.session.add(c)
db.session.commit()
flash('Congratulations, you created a character called "{}"!'.format(c.name))
return redirect(url_for('game.character', characterid = c.id))
return render_template('game/create_character.html', form=form)
@bp.route('/delete_character/<characterid>', methods=['GET', 'POST'])
@login_required
def delete_character(characterid):
c = Character.query.get_or_404(characterid)
if not c in current_user.owned_characters:
flash('Naughty!')
return redirect(url_for('front.index'))
form = DeleteCharacterForm()
if form.validate_on_submit():
if c.name != form.name.data:
flash('Incorrect character name, try again')
return redirect(url_for('game.delete_character', characterid=c.id))
db.session.delete(c)
db.session.commit()
flash('You deleted the character called "{}"!'.format(form.name.data))
return redirect(url_for('auth.userpage', username = current_user.username))
return render_template('game/delete_character.html', form=form, character=c)
``` |
{
"source": "jonathanvanschenck/python-remote-object",
"score": 3
} |
#### File: python-remote-object/remote_object/server.py
```python
import socketserver
import pickle
import cloudpickle
from .errors import TraceableError
class Server(socketserver.TCPServer):
"""A remote_object server.Server object
This class is intended to host a python object so that
method calls made on a client.Client instance pointed at
this server can be forwaded here and applied to .pyobj,
any return values/errors resulting from that call are
passed back to the client and are returned/raised there.
This class inherits from a socketserver.TCPserver object.
The main extensions of the class is to hold a reference to
some generic python object, and add the .call_method method,
which handles calls, errors and returns.
:param server_address: a tuple containing the address and port
on which to host the server: (IP, PORT)
:param pyobj: a reference to python object which will be hosted
by the server. Any method calls made by a client
will be applied to this object, and all values/errors
it produces will be returned/raised.
Example Usage::
import time
sever.Server(('some ip',9999),time) as server:
# Set server up to accept connections forever,
# clients may now check the server's processor
# time, locally callable as time.time(), by
# calling: client.Client('some ip', 9999).time()
server.serve_forever()
"""
def __init__(self, server_address, python_object):
socketserver.TCPServer.__init__(self, server_address, MessageHandler)
self.pyobj = python_object
def call_method(self,fname,args,kwargs):
"""Handler for method calls on .pyobj
This method handles all attempted method calls on the .pyobj.
There are three main cases:
1) the requested method does not exist, causes an error
or is called with an incorrect signiture
=> Catch the error and return it with its traceback
2) the requested 'method' is actually and attribute
=> ignore the call signiture and return the attribute
3) the requested method is successfully called
=> return any resulting values
"""
try:
method = self.pyobj.__getattribute__(fname)
except Exception as e:
# Typically AttributeError, if .pyobj does not have
# the requested method
return TraceableError(e)
try:
return_value = method(*args,**kwargs)
except TypeError as e:
if "not callable" in e.args[0]:
# If 'method' is not callable, it is an attribute.
# assuming the .pyobj doesn't itself mistakenly
# call an uncallable object in the method.
return_value = method
else:
# Else, some other TypeError occured which is internal
# to the .pyobj method which was called
return TraceableError(e)
except Exception as e:
# All other internal .pyobj method errors
return TraceableError(e)
# Else, the method was successfully called
return return_value
#def encode_return_value(self,return_value):
# return pickle.dumps(return_value)
#def decode_message(self,msg):
# return pickle.loads(msg)
def parse_message(self,msg):
"""Parses incoming messages and encodes return values
"""
fname,args,kwargs = pickle.loads(msg)#self.decode_message(self,msg)
return_value = self.call_method(fname,args,kwargs)
return cloudpickle.dumps(return_value)#self.encode_return_value(return_value)
class MessageHandler(socketserver.StreamRequestHandler):
"""MessageHandler class for server.Server instance.
This class inherts from the socket.server.StreamRequestHandler,
see the socketserver documentation for details about its
use in a socketserver.TCPServer object (from which the server.Server
inherits).
The main overriden method from the base class is .handle(), which
gets the message send by a client TCP socket connection and passes
it to the server.Server instance for parsing. The server returns
a pre-encoded return message, which the handler then passes back
to the client over the same TCP socket.
"""
def handle(self):
msg = self.rfile.readline().strip()
rmsg = self.server.parse_message(msg)
self.wfile.write(rmsg)
``` |
{
"source": "jonathanvanschenck/smip",
"score": 2
} |
#### File: smip/extension/language.py
```python
from markdown.inlinepatterns import InlineProcessor
from markdown.extensions import Extension
GREEK = {
"alpha" : "α",
"Alpha" : "Α",
"beta" : "β",
"Beta" : "Β",
"gamma" : "γ",
"Gamma" : "Γ",
"delta" : "δ",
"Delta" : "Δ",
"epsilon" : "ε",
"Epsilon" : "Ε",
"zeta" : "ζ",
"Zeta" : "Ζ",
"eta" : "η",
"Eta" : "Η",
"theta" : "θ",
"Theta" : "Θ",
"iota" : "ι",
"Iota" : "Ι",
"kappa" : "κ",
"Kappa" : "Κ",
"lambda" : "λ",
"Lambda" : "Λ",
"mu" : "μ",
"Mu" : "Μ",
"nu" : "ν",
"Nu" : "Ν",
"xi" : "ξ",
"Xi" : "Ξ",
"omicron" : "ο",
"Omicron" : "Ο",
"pi" : "π",
"Pi" : "Π",
"rho" : "ρ",
"Rho" : "Ρ",
"sigma" : "σ",
"Simga" : "&Simga;",
"sigmaf" : "ς",
"tau" : "τ",
"Tau" : "Τ",
"upsilon" : "υ",
"Upsilon" : "Υ",
"phi" : "φ",
"Phi" : "Φ",
"chi" : "χ",
"Chi" : "Χ",
"psi" : "ψ",
"Psi" : "Ψ",
"omega" : "ω",
"Omega" : "Ω",
}
# Add accent characters
GREEK.update({
"omicronacute" : "ó",
"Omicronacute" : "Ó"
})
HEBREW = {
"alef" : "א",
"bet" : "ב",
"gimel" : "ג",
"dalet" : "ד",
"he" : "ה",
"vav" : "ו",
"zayin" : "ז",
"het" : "ח",
"tet" : "ט",
"yod" : "י",
"kaff" : "ך",
"kaf" : "כ",
"lamed" : "ל",
"memf" : "ם",
"mem" : "מ",
"nunf" : "ן",
"nun" : "נ",
"samekh" : "ס",
"ayin" : "ע",
"pef" : "ף",
"pe" : "פ",
"tsadif" : "ץ",
"tsadi" : "צ",
"qof" : "ק",
"resh" : "ר",
"shin" : "ש",
"tav" : "ת"
}
class ReplaceLeaderInline(InlineProcessor):
"""Replaces characters of type `\string` from a dictionary"""
def __init__(self, replace_map, *args, **kwargs):
super().__init__(r"\\([^\b\\&]+)", *args, **kwargs)
self.map = replace_map
def handleMatch(self, m, data):
el, start, end = None, None, None
try:
el = self.map[m.group(1)]
except KeyError:
pass
# print(m.groups())
else:
start = m.start(0)
end = m.end(0)
return el, start, end
class BibicalLanguageExtension(Extension):
def extendMarkdown(self, md):
md.inlinePatterns.register(
ReplaceLeaderInline(GREEK,md),
'greek_replace',
1000
)
md.inlinePatterns.register(
ReplaceLeaderInline(HEBREW,md),
'hebrew_replace',
1001
)
```
#### File: smip/smip/parser.py
```python
import biblescrapeway as bsw
class Parser:
def __init__(self, string):
self.range_litwst = string
(\[)[a-z]+(\])
def get_reference_text(string, index_file = None, version = "ESV"):
# # if "'" in string or '"' in string:
# # return string
# # print(string)
# if '`' in string:
# return string
#
# m = re.search("[ ]*\(([A-Z]+)\)[ ]*", string)
# if not m is None:
# version = m.group(1)
# string = string[:m.start(0)] + string[m.end(0):]
# try:
# ref_list = bsw.reference.parse_reference_string(string)
# except:
# return "Error parsing: '{}'".format(string)
ref_list = bsw.reference.parse_reference_string(string)
string_list = []
for r in ref_list:
# TODO : add lookup in index file
if r.start.equals(r.end) and not r.start.verse is None:
verse = bsw.scrap(r.start, version)
sub = verse.text
else:
verses = []
for chap in range(r.start.chapter,r.end.chapter+1):
verses += bsw.scrap(bsw.reference.Reference(r.start.book,chap,None),version)
sub = " ".join([v.text for v in verses if r.contains(v)])
string_list.append(sub)
# TODO : save queries in index file
return '{} : \`'.format(get_reference_only(string)) + " ... ".join(string_list) + '\`' + " ({})".format(version)
# def get_block_reference(string, index_file = None):
# # TODO
# return string
def get_reference_only(string):
"""Get a normalized reference string for a list of references"""
try:
ref_list = bsw.reference.parse_reference_string(string)
except:
return "Error parsing: '{}'".format(string)
previous_book, previous_chapter, previous_verse = None, None, None
string_list = []
for r in ref_list:
sub = ""
# Handle single verse reference
# HACK : .is_single is broken for chapters
if (r.start.equals(r.end)):
full_string = r.start.to_string().strip(" ")
if previous_book != r.start.book:
sub = full_string
elif (
# get C:V if previous is different
# chapter, or previous is an
# entire chapter
previous_chapter != r.start.chapter\
or previous_verse is None
):
sub = full_string.split(" ")[-1]
else:
sub = full_string.split(":")[-1]
# Handle range reference
else:
if previous_book != r.start.book:
sub = "{} ".format(r.start.book)
if (
previous_chapter != r.start.chapter\
or previous_verse is None
):
sub = sub + str(r.start.chapter)
if not r.start.verse is None:
sub = sub + ":{}-".format(r.start.verse)
else:
sub = sub + "-"
if (
r.start.chapter != r.end.chapter\
or r.start.verse is None
):
sub = sub + str(r.end.chapter)
if not r.end.verse is None:
sub = sub + ":{}".format(r.end.verse)
else:
sub = sub + str(r.end.verse)
# Prepare next loop
previous_book, previous_chapter, previous_verse = r.end.book, r.end.chapter, r.end.verse
string_list.append(sub)
return ", ".join(string_list)
``` |
{
"source": "jonathanvanschenck/spectra-sweet",
"score": 2
} |
#### File: flask/app/events.py
```python
from app import socketio
from flask import copy_current_request_context
from flask_socketio import disconnect
from flask_socketio import Namespace as _Namespace
from seabreeze_server import SeaBreezeServerError
from seabreeze.cseabreeze import SeaBreezeError
from app.webspectrometer import WebSpectrometerManager
sm = WebSpectrometerManager()
class Namespace(_Namespace):
def on_connect(self):
self.emit('log_on_client', {'data': 'connected from python'})
def on_disconnect(self):
@copy_current_request_context
def can_disconnect():
disconnect()
self.emit('log_on_client', {'data': 'disconnected from python'},
callback = can_disconnect)
def on_send_message(self, msg):
print("sending message at python")
self.emit('log_on_client', msg)
# TODO: ConnectionRefusedError is broken, since LITERALLY EVERYTHING requires
# The server to be up. Maybe add a whole wrapper error check?
class GUINamespace(Namespace):
def on_select_spectrometer(self,msg):
index = int(msg['index'])
success = True
try:
sm.select_spectrometer(index)
except (SeaBreezeServerError,ConnectionRefusedError):
self.emit('detach_spectrometer',{'dev_list':sm.list_devices()})
self.emit('attach_spectrometer',{'serial_number':sm.serial_number})
def on_deselect_spectrometer(self):
sm.deselect_spectrometer()
self.emit('detach_spectrometer',{'dev_list':sm.list_devices()})
def on_get_device_list(self):
self.emit('update_device_list',{'dev_list':sm.list_devices()})
def on_setup_spectrometer(self,msg):
try:
sm.parse_measure_type(**msg)
i = sm.intensities()
except (SeaBreezeServerError,SeaBreezeError,ConnectionRefusedError):
# either not selected, or not opened, or docker dead
self.emit('detach_spectrometer',{'dev_list':sm.list_devices()})
else:
self.emit('set_up_plot', {'x': list(sm.wavelengths()),
'y': list(i),
'it': str(sm.it),
'ave': str(sm.ave)})
def on_get_xy(self):
try:
i = sm.intensities()
except (SeaBreezeServerError,SeaBreezeError,ConnectionRefusedError):
# either not selected, or not opened, or docker dead
self.emit('detach_spectrometer',{'dev_list':sm.list_devices()})
else:
self.emit('update_xy', {'x': list(sm.wavelengths()),
'y': list(i)})
socketio.on_namespace(GUINamespace('/plot'))
```
#### File: flask/app/routes.py
```python
from app import app
from flask import render_template, url_for
@app.route('/')
@app.route('/index', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/gui', methods=['GET', 'POST'])
def gui():
return render_template('gui.html')
@app.route('/raw')
def raw():
return render_template('raw.html')
``` |
{
"source": "jonathanvanschenck/StatsimusPrime",
"score": 2
} |
#### File: statsimusprime/service/statsservice.py
```python
from .baseservice import IDError
from .sheetsservice import SheetsService
class StatsService(SheetsService):
def __repr__(self):
return "<StatsService Object>"
@property
def viewer_id(self):
if self.__viewer_id is None:
raise IDError("Service id is uninitialized, use .initialize_env(...)")
return self.__viewer_id
@viewer_id.setter
def viewer_id(self,id):
self.__viewer_id = id
if not self.__viewer_id is None:
self.retrieve_viewer_ids()
def retrieve_viewer_ids(self):
s = self.service.spreadsheets().get(spreadsheetId = self.__viewer_id).execute()
self.viewer_sheet_ids = {
sheet['properties']['title']: sheet['properties']['sheetId']\
for sheet in s['sheets']
}
self.viewer_sheet_properties = {
sheet['properties']['title']: sheet['properties']\
for sheet in s['sheets']
}
return self
def retrieve_meet_parameters(self, roster_json, draw_json):
self.meet_params = {}
self.meet_params['total_teams'] = len(set([quizzer['team'] for quizzer in roster_json]))
self.meet_params['total_quizzes'] = len(draw_json)
try:
self.meet_params['prelims_per_team_number'] = 3 * sum([quiz['type'] == "P" for quiz in draw_json]) // self.meet_params['total_teams']
except ZeroDivisionError:
self.meet_params['prelims_per_team_number'] = 0
self.meet_params['total_quizzers'] = len(roster_json)
try:
self.meet_params['total_quiz_slots'] = max([int(quiz['slot_num']) for quiz in draw_json])
except ValueError:
self.meet_params['total_quiz_slots'] = 0
try:
self.meet_params['total_rooms'] = max([int(quiz['room_num']) for quiz in draw_json])
except ValueError:
self.meet_params['total_rooms'] = 0
return self
# def generate_all_values(self):
# for sheet in ['DRAW','IND']:
# yield self.get_values(self.id,"'{}'!A1:ZZ300".format(sheet))
def generate_update_sheet_dimension_json(self, sheet_property_json,
column_count = None,
row_count = None):
"""Generate updateSheetPropertiesRequest to change sheet columnCount/rowCount
Note, this also updates the `.sheet_properties` object to reflect change
Parameters
----------
sheet_title : str
Title of the sheet to be modified
column_count : int
The number of columns specified for the sheet
row_count : int
The number of rows specified for the sheet. None means do not change
"""
fields = []
if not column_count is None:
fields.append("gridProperties.columnCount")
if not row_count is None:
fields.append("gridProperties.rowCount")
sheet_property_json = self.update_sheet_properties_json(
sheet_property_json,
grid_properties = self.generate_grid_properties_json(
column_count = column_count,
row_count = row_count
)
)
return self.generate_update_sheet_properties_json(
sheet_property_json,
fields = ",".join(fields)
)
def set_bracket_weights(self,weights_dictionary):
"""Change the ind score weighting of the bracket
weights : dictionary
Dictionary of weight values (floats, ints or None), with any of the following
keys: ["P", "S", "A", "B"]. If "None", then that bracket type does not
contribute to the total weight.
"""
processed_weights = [[1.0],[1.0],[0.7],[0.5]]
for i,k in enumerate("PSAB"):
try:
w = weights_dictionary.pop(k)
except KeyError:
pass
else:
if w is None:
processed_weights[i][0] = "NA"
else:
processed_weights[i][0] = w
value_range_list = [self.generate_value_range_json(
range = "Utils!C2:C5",
values = processed_weights
)]
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list
)
return self
def set_roster(self,roster_json):
"""Update the contents of the roster
Parameters
----------
roster_json : list
list of dictionaries representing each quizzer. Each dictionary should
have the keys: ["id", "team", "bib", "name", "moniker", "is_rookie", "is_cap", "is_cc"]
"""
value_range_list = []
column_names = {"id":0, "name":1, "moniker":2, "is_rookie":3, "is_cap":4, "is_cc":5}
team_list = sorted(list(set([quizzer["team"] for quizzer in roster_json])))
roster_matrix = [[team] + (5 * 6) * [""] for team in team_list]
for ti, team in enumerate(team_list):
for quizzer in [quizzer for quizzer in roster_json if quizzer["team"] == team]:
offset = 6 * (int(quizzer['bib']) - 1)
if roster_matrix[ti][1 + offset] != "":
# Log if quizzer is overwritten
print("Bib numbering error, both {} and {} have bib {}".format(
roster_matrix[ti][1 + offset + column_names['name']],
quizzer['name'],
quizzer['bib']
))
for k,v in column_names.items():
roster_matrix[ti][1 + offset + v] = quizzer[k]
value_range_list.append(
self.generate_value_range_json(
range = "Roster!A3:AE" + str(len(team_list) + 3),
values = roster_matrix
)
)
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list
)
return self
def set_draw(self, draw_json):#, roster_json):
"""Update the contents of the draw
Parameters
----------
draw_json : list
list of dictionaries representing each quiz. Each dictionary should
have the keys: ["quiz_num","slot_num","room_num","slot_time":,"team1","team2","team3", "url", "type"]
roster_json : list
list of dictionaries representing each quizzer. Each dictionary should
have the keys: ["id", "team", "bib", "name", "moniker", "is_rookie", "is_cap", "is_cc"]
"""
# Step 1: Insert draw into DrawLookup
column_names_left = ["quiz_num","slot_time","room_num","slot_num"]
column_names_right = ["team1","team2","team3"]
draw_matrix_left = [[quiz[key] for key in column_names_left] for quiz in draw_json]
draw_matrix_right = []
for quiz in draw_json:
if "_" in quiz['team1']:
if quiz['team1'][0] == "P":
# Calculate post-prelim ranking lookup
quiz_row = []
for key in column_names_right:
rank = int(quiz[key].split("_")[-1])
lookup = "TeamSummary!B{}".format(2+rank)
quiz_row.append('={}'.format(lookup))
else:
# Calculate schedule lookup
quiz_row = []
for key in column_names_right:
quiz_num, placement = quiz[key].split("_")
quiz_previous = [q for q in draw_json if q['quiz_num'] == quiz_num][0]
offset_row = 2 + 3 * (int(quiz_previous['slot_num']) - 1)
offset_column = 2 + 4 * (int(quiz_previous['room_num']) - 1)
team_range = "Schedule!{}:{}".format(
self.generate_A1_from_RC(offset_row + 0, offset_column + 1),
self.generate_A1_from_RC(offset_row + 2, offset_column + 1)
)
placement_range = "Schedule!{}:{}".format(
self.generate_A1_from_RC(offset_row + 0, offset_column + 3),
self.generate_A1_from_RC(offset_row + 2, offset_column + 3)
)
error_msg = "{placement}{suffix} in {quiz_num}".format(
placement = placement,
suffix = {"1":"st","2":"nd","3":"rd"}[placement],
quiz_num = quiz_num
)
quiz_row.append(
'=IFERROR(INDEX({team_range},MATCH({placement},{placement_range},0),0),"{error_msg}")'.format(
team_range = team_range,
placement = placement,
placement_range = placement_range,
error_msg = error_msg
)
)
else:
# Just add the prelim quiz
quiz_row = [quiz[key] for key in column_names_right]
draw_matrix_right.append(quiz_row)
#draw_matrix_right = [[quiz[key] for key in column_names_right] for quiz in draw_json]
value_range_list = [
self.generate_value_range_json(
range = "DrawLookup!B3:E" + str(len(draw_matrix_left) + 3),
values = draw_matrix_left
),
self.generate_value_range_json(
range = "DrawLookup!G3:I" + str(len(draw_matrix_right) + 3),
values = draw_matrix_right
)
]
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list,
value_input_option = "USER_ENTERED"
)
# Step 2: Prepare the rest of the DrawLookup page
#TN = self.meet_params['total_teams']#len(set([quizzer['team'] for quizzer in roster_json]))
#TQN = self.meet_params['total_quizzes']#len(draw_json)
sheet_id = self.sheet_ids['DrawLookup']
requests = []
# Set sheet width to 11 + 'total_teams'
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.sheet_properties['DrawLookup'],
column_count = 11 + self.meet_params['total_teams']
))
# Copy L2 right 'total_teams' times
bbox_source = list(self.generate_bbox_from_A1("L2:L2"))
bbox_dest = 1*bbox_source
bbox_dest[1] += 1
bbox_dest[3] += self.meet_params['total_teams'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy L3 right 'total_teams' times and down 'total_quizzes' times
bbox_source = list(self.generate_bbox_from_A1("L3:L3"))
bbox_dest = 1*bbox_source
bbox_dest[2] += self.meet_params['total_quizzes'] - 1
bbox_dest[3] += self.meet_params['total_teams'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy F3 down 'total_quizzes' times
bbox_source = list(self.generate_bbox_from_A1("F3:F3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += self.meet_params['total_quizzes'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy K3 down 'total_quizzes' times
bbox_source = list(self.generate_bbox_from_A1("K3:K3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += self.meet_params['total_quizzes'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Update QUIZINDEXLOOKUP to be DrawLookup!L3:(L3+TN+TQN)
bbox = list(self.generate_bbox_from_A1("L3:L3"))
bbox[2] += self.meet_params['total_quizzes'] - 1
bbox[3] += self.meet_params['total_teams'] - 1
requests.append(
self.generate_update_named_range_json(
sheet_id = sheet_id,
named_range_id = self.named_range_ids['QUIZINDEXLOOKUP'],
name = "QUIZINDEXLOOKUP",
bbox = bbox,
fields = "range"
)
)
response = self.batch_update(
file_id = self.id,
request_list = requests
)
return self
def initialize_team_summary(self):#, roster_json):
"""Prepares the TeamSummary tab
The copies down columns E and F
"""
#TN = len(set([quizzer['team'] for quizzer in roster_json]))
sheet_id = self.sheet_ids['TeamSummary']
requests = []
# Set sheet width to 10
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.sheet_properties['TeamSummary'],
column_count = 10
))
# Copy down E3:F3
bbox_source = list(self.generate_bbox_from_A1("E3:F3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += self.meet_params['total_teams'] - 1
requests.append(self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest
))
response = self.batch_update(
file_id = self.id,
request_list = requests
)
return self
def initialize_schedule(self):#, draw_json):
"""Prepares the schedule tab
"""
#TSN = max([int(quiz['slot_num']) for quiz in draw_json])
#TRN = max([int(quiz['room_num']) for quiz in draw_json])
sheet_id = self.sheet_ids['Schedule']
requests = []
# Set sheet width to 2 + 4*TRN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.sheet_properties['Schedule'],
column_count = 2 + 4*self.meet_params['total_rooms'],
row_count = 2 + 3*self.meet_params['total_quiz_slots']
))
# Copy down A3:F5
bbox_source = list(self.generate_bbox_from_A1("A3:F5"))
bbox_dest = 1*bbox_source
for i in range(1,self.meet_params['total_quiz_slots']):
# Shift window down 3 rows
bbox_dest[0] += 3
bbox_dest[2] += 3
requests.append(self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest
))
# Copy right C1:F
bbox_source = list(self.generate_bbox_from_A1("C1:F"+str(2+3*self.meet_params['total_quiz_slots'])))
bbox_dest = 1*bbox_source
for i in range(1,self.meet_params['total_rooms']):
# Shift the window right 4 columns
bbox_dest[1] += 4
bbox_dest[3] += 4
requests.append(self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest
))
self.batch_update(
file_id = self.id,
request_list = requests
)
return self
def set_team_parsed(self):#, draw_json, roster_json):
#TN = len(set([quizzer['team'] for quizzer in roster_json]))
#PN = 3 * sum([quiz['type'] == "P" for quiz in draw_json]) // TN
# Update Quiz Total and Average team points formulas
points_cell_string = ", ".join([
self.generate_A1_from_RC(2, 8 + i * 5) for i in range(self.meet_params['prelims_per_team_number'])
])
value_range_list = [
self.generate_value_range_json(
range = "TeamParsed!C3:C3",
values = [["=SUM({})".format(points_cell_string)]]
),
self.generate_value_range_json(
range = "TeamParsed!D3:D3",
values = [['=IFERROR(AVERAGE({}), 0)'.format(points_cell_string)]]
)
]
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list,
value_input_option = "USER_ENTERED"
)
sheet_id = self.sheet_ids['TeamParsed']
requests = []
# Set sheet width to 4 + 5*PN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.sheet_properties['TeamParsed'],
column_count = 4 + 5*self.meet_params['prelims_per_team_number']
))
# Copy A3 down TN times
bbox_source = list(self.generate_bbox_from_A1("A3:A3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += self.meet_params['total_teams'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy C3:I3 down TN times
bbox_source = list(self.generate_bbox_from_A1("C3:I3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += self.meet_params['total_teams'] - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy E1:(I2+TN) right PN times
bbox_source = list(self.generate_bbox_from_A1("E1:I1"))
bbox_source[2] += 1 + self.meet_params['total_teams'] # add header and TN teams
bbox_dest = 1*bbox_source
for i in range(1,self.meet_params['prelims_per_team_number']):
# Shift window right 5
bbox_dest[1] += 5
bbox_dest[3] += 5
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_NORMAL"
)
)
self.batch_update(
file_id = self.id,
request_list = requests
)
return self
def set_individual_parsed(self, roster_json):
#TN = len(set([quizzer['team'] for quizzer in roster_json]))
#PN = 3 * sum([quiz['type'] == "P" for quiz in draw_json]) // TN
#QN = len(roster_json)
value_range_list = []
# Step 1: inject roster
column_names = ["id", "name", "moniker", "team", "bib"]
roster_matrix = [[quizzer[k] for k in column_names] for quizzer in roster_json]
QN = len(roster_matrix)
value_range_list.append(
self.generate_value_range_json(
range = "IndividualParsed!B3:F" + str(QN + 3),
values = roster_matrix
)
)
# Step 2: correct formulas
column_indicies = [8 + 11 + i*12 for i in range(self.meet_params['prelims_per_team_number']+4)] # prelims
points_cell_string = ", ".join([
self.generate_A1_from_RC(2, ci) for ci in column_indicies
])
value_range_list.append(
self.generate_value_range_json(
range = "IndividualParsed!G3:G3",
values = [["=IFERROR(AVERAGE({}), 0)".format(points_cell_string)]]
)
)
value_range_list.append(
self.generate_value_range_json(
range = "IndividualParsed!T3:T3",
values = [['=IF(S3="","",S3*INDEX(WEIGHTSVALUE,MATCH(IF(I$1<={},"P",$H3),WEIGHTSKEY,0),0))'.format(self.meet_params['prelims_per_team_number'])]]
)
)
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list,
value_input_option = "USER_ENTERED"
)
sheet_id = self.sheet_ids['IndividualParsed']
requests = []
# Set sheet width to 8 + 12*(PN+4)
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.sheet_properties['IndividualParsed'],
column_count = 8 + 12*(self.meet_params['prelims_per_team_number'] + 4)
))
# Copy A3 down QN times
bbox_source = list(self.generate_bbox_from_A1("A3:A3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += QN - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Copy G3:T3 down QN times
bbox_source = list(self.generate_bbox_from_A1("G3:T3"))
bbox_dest = 1*bbox_source
bbox_dest[0] += 1
bbox_dest[2] += QN - 1
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_FORMULA"
)
)
# Header, P1, P2, ... , PN , B1 , B2 , B3 , B4
# 8 12 12 ... 12 12 12 12 12
# Copy quiz section over:
bbox_source = list(self.generate_bbox_from_A1("I1:T1"))
bbox_source[2] += 1 + QN # add header and QN quizzers
bbox_dest = 1*bbox_source
for i in range(1, self.meet_params['prelims_per_team_number'] + 4):
# Shift window right 12
bbox_dest[1] += 12
bbox_dest[3] += 12
requests.append(
self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest,
paste_type = "PASTE_NORMAL"
)
)
self.batch_update(
file_id = self.id,
request_list = requests
)
return self
def initialize_viewer(self):
"""Prepares the viewer's schedule tab
"""
# # Clean the viewer
# range_list = []
#
# range_list.append("Schedule!B3:E5")
#
# self.batch_clear_value(
# file_id = self.viewer_id,
# range_list = range_list
# )
sheet_id = self.viewer_sheet_ids['Schedule']
requests = []
# Set Schedule sheet width to 2 + 4*TRN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.viewer_sheet_properties['Schedule'],
column_count = 2 + 4*self.meet_params['total_rooms'],
row_count = 2 + 3*self.meet_params['total_quiz_slots']
))
# Set DrawLookup sheet to 8 by 2+TQN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.viewer_sheet_properties['DrawLookup'],
column_count = 8,
row_count = 2 + self.meet_params['total_quizzes']
))
# Set Roster sheet to 4*5 + 1 by 2+TN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.viewer_sheet_properties['Roster'],
column_count = 1 + 4*5,
row_count = 2 + self.meet_params['total_teams']
))
# Set TeamSummary sheet to rows 2+TN
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.viewer_sheet_properties['TeamSummary'],
row_count = 2 + self.meet_params['total_teams']
))
# Set IndividualSummary sheet to rows 2+QN+5
requests.append(self.generate_update_sheet_dimension_json(
sheet_property_json = self.viewer_sheet_properties['IndividualSummary'],
row_count = 2 + self.meet_params['total_quizzers'] + 5
))
# Copy down A3:F5
bbox_source = list(self.generate_bbox_from_A1("A3:F5"))
bbox_dest = 1*bbox_source
for i in range(1,self.meet_params['total_quiz_slots']):
# Shift window down 3 rows
bbox_dest[0] += 3
bbox_dest[2] += 3
requests.append(self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest
))
# Copy right C1:F
bbox_source = list(self.generate_bbox_from_A1("C1:F"+str(2+3*self.meet_params['total_quiz_slots'])))
bbox_dest = 1*bbox_source
for i in range(1,self.meet_params['total_rooms']):
# Shift the window right 4 columns
bbox_dest[1] += 4
bbox_dest[3] += 4
requests.append(self.generate_copy_paste_json(
sheet_id = sheet_id,
bbox_source = bbox_source,
bbox_dest = bbox_dest
))
self.batch_update(
file_id = self.viewer_id,
request_list = requests
)
return self
# def copy_over_all(self):
# """Copies everything from stats doc to viewer doc
# """
# return self.copy_over_draw()
def copy_over_schedule(self):
"""Copies the schedule tab from stats to viewer
"""
str_temp = "Schedule!{}:{}"
range_list_source = []
range_list_dest = []
# Copy B3:?? to A3:??
range_list_source.append(str_temp.format(
self.generate_A1_from_RC(2,1),
self.generate_A1_from_RC(
2 + 3*self.meet_params['total_quiz_slots'] - 1,
2 + 4*self.meet_params['total_rooms'] - 1
)
))
range_list_dest.append(str_temp.format(
self.generate_A1_from_RC(2,0),
self.generate_A1_from_RC(
2 + 3*self.meet_params['total_quiz_slots'] - 1,
1 + 4*self.meet_params['total_rooms'] - 1
)
))
self.batch_copy_over(
file_id_source = self.id,
range_list_source = range_list_source,
file_id_dest = self.viewer_id,
range_list_dest = range_list_dest,
value_render_option = "FORMATTED_VALUE",
value_input_option = "USER_ENTERED"
)
return self
def copy_over_draw(self):
"""Copies the DrawLookup tab from stats to viewer
"""
str_temp = "DrawLookup!{}:{}"
range_list_source = []
range_list_dest = []
# Copy B3:I(2+TQN) to A3:H(2+TQN)
range_list_source.append(str_temp.format(
"B3",
"I"+str(2+self.meet_params['total_quizzes'])
))
range_list_dest.append(str_temp.format(
"A3",
"H"+str(2+self.meet_params['total_quizzes'])
))
self.batch_copy_over(
file_id_source = self.id,
range_list_source = range_list_source,
file_id_dest = self.viewer_id,
range_list_dest = range_list_dest,
value_render_option = "FORMATTED_VALUE",
value_input_option = "USER_ENTERED"
)
return self
def copy_over_roster(self):
"""Copies the Roster tab from stats to viewer
"""
str_temp = "Roster!{}:{}"
range_list_source = []
range_list_dest = []
# Copy A3:A(2+TN) to A3:A(2+TN)
range_list_source.append(str_temp.format(
"A3",
"A"+str(2+self.meet_params['total_teams'])
))
range_list_dest.append(str_temp.format(
"A3",
"A"+str(2+self.meet_params['total_teams'])
))
# Copy last four columns of each window over
for i in range(5):
range_list_source.append(str_temp.format(
self.generate_A1_from_RC(
2,
1 + 2 + 6*i
),
self.generate_A1_from_RC(
2 + self.meet_params['total_teams'] - 1,
1 + 2 + 6*i + 4 - 1
)
))
range_list_dest.append(str_temp.format(
self.generate_A1_from_RC(
2,
1 + 4*i
),
self.generate_A1_from_RC(
2 + self.meet_params['total_teams'] - 1,
1 + 4*i + 4 - 1
)
))
self.batch_copy_over(
file_id_source = self.id,
range_list_source = range_list_source,
file_id_dest = self.viewer_id,
range_list_dest = range_list_dest,
value_render_option = "FORMATTED_VALUE",
value_input_option = "USER_ENTERED"
)
return self
def copy_over_team_summary(self):
"""Copies the TeamSummary tab from stats to viewer
"""
str_temp = "TeamSummary!{}:{}"
range_list_source = []
range_list_dest = []
# Copy A3:I(2+TN) to A3:I(2+TN)
range_list_source.append(str_temp.format(
"A3",
"I"+str(2+self.meet_params['total_teams'])
))
range_list_dest.append(str_temp.format(
"A3",
"I"+str(2+self.meet_params['total_teams'])
))
self.batch_copy_over(
file_id_source = self.id,
range_list_source = range_list_source,
file_id_dest = self.viewer_id,
range_list_dest = range_list_dest,
value_render_option = "FORMATTED_VALUE",
value_input_option = "USER_ENTERED"
)
return self
def copy_over_individual_summary(self):
"""Copies the IndividualSummary tab from stats to viewer
"""
str_temp = "IndividualSummary!{}:{}"
range_list_source = []
range_list_dest = []
# Copy A3:A(2+QN) to A3:A(2+QN)
range_list_source.append(str_temp.format(
"A3",
"A"+str(2+self.meet_params['total_quizzers'])
))
range_list_dest.append(str_temp.format(
"A3",
"A"+str(2+self.meet_params['total_quizzers'])
))
# Copy D3:H(2+QN) to B3:F(2+QN)
range_list_source.append(str_temp.format(
"D3",
"H"+str(2+self.meet_params['total_quizzers'])
))
range_list_dest.append(str_temp.format(
"B3",
"F"+str(2+self.meet_params['total_quizzers'])
))
self.batch_copy_over(
file_id_source = self.id,
range_list_source = range_list_source,
file_id_dest = self.viewer_id,
range_list_dest = range_list_dest,
value_render_option = "FORMATTED_VALUE",
value_input_option = "USER_ENTERED"
)
return self
# def remove_ss_urls(self):
# """Remove url references to scoresheets from DrawLookup
# """
# self.batch_clear_value(
# file_id = self.viewer_id,
# range_list = ["DrawLookup!J3:J"+str(3+self.meet_params['total_quizzes'])]
# )
# return self
def update_ss_urls(self,draw_json):
"""Updates the url references to scoresheets in DrawLookup
"""
values = [['="{}"'.format(quiz['url'])] for quiz in draw_json]
value_range_list = [
self.generate_value_range_json(
range = "DrawLookup!J3:J"+str(3+self.meet_params['total_quizzes']-1),
values = values
)
]
self.batch_update_value(
file_id = self.id,
value_range_list = value_range_list,
value_input_option = "USER_ENTERED"
)
return self
``` |
{
"source": "JonathanVaughan/project",
"score": 3
} |
#### File: project/application/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, BooleanField, validators, IntegerField
from wtforms.validators import DataRequired
from application.models import Orders
class OrderForm(FlaskForm):
first_name = StringField('First Name',validators=[DataRequired()])
last_name = StringField('Last Name',validators=[DataRequired()])
number = StringField('number',validators=[DataRequired()])
address = StringField('Address',validators=[DataRequired()])
pizzaid = IntegerField('Pizza',validators=[DataRequired()])
order_quantity = IntegerField('Quantity',validators=[DataRequired()])
# last_name = StringField('Last Name', [validators.DataRequired()], [validators.Length(min=2, max=30)])
# number = StringField('Phone no.', [validators.DataRequired()], [validators.Length(min=2, max=30)])
# address = StringField('Address', [validators.DataRequired()], [validators.Length(min=2, max=30)])
# pizza = StringField('Pizza', [validators.DataRequired()], [validators.Length(min=2, max=30)])
# order_quantity = StringField('Quantity', [validators.DataRequired()], [validators.Length(min=2, max=30)])
submit = SubmitField("Place Order")
# last_name = StringField('Last Name',
# validators=[
# DataRequired(),
# Length(min=3, max=30)
# ])
# number = StringField('Phone no.',
# validators=[
# DataRequired(),
# Length(min=5, max=20)
# ])
# address = StringField('Address',
# validators=[
# DataRequired(),
# Length(min=5, max=140)
# ])
# pizza = StringField('Pizza',
# validators=[
# DataRequired(),
# Length(min=5, max=140)
# ])
# quantity = StringField('Quantity',
# StringField=[
# DataRequired()
# ])
class StockForm(FlaskForm):
pizza_name = StringField('Name', validators=[DataRequired()])
submit = SubmitField('Add')
#pizzaid = StringField('Pizza',
# validators=[
# DataRequired(),
# Length(min=5, max=140)
# ])
#stock_quantity = StringField('Quantity',
# validators=[
# DataRequired()
# ])
# submitf = SubmitField('Add Stock')
class updateorderform(FlaskForm):
orderstatus = StringField('order', [validators.DataRequired()])
submit = SubmitField('Delivered')
#def order_query():
# return Orders.query
``` |
{
"source": "jonathanverner/brython-jinja2",
"score": 2
} |
#### File: brython-jinja2/management/doc.py
```python
from http import server
import os
from plumbum import local, ProcessExecutionError
import sys
from webbrowser import open_new_tab
from .utils import M
sphinx = local['sphinx-build']
sphinx_args = ["-d", "_build/doctrees"]
apidoc = local['sphinx-apidoc']
@M.command()
def build(format="html"):
if format == "latex":
sphinx_args.extend(["-D", "latex_paper_size=a4"])
apidoc("-o", './doc/en/api/', './src/')
with local.cwd('./doc/en'):
sphinx(".", "_build", "-b", format, *sphinx_args, stdout=sys.stdout, stderr=sys.stderr)
@M.command()
def view(port=7364):
with local.cwd('./doc/en/_build/'):
open_new_tab("http://localhost:{}/".format(port))
server.test(HandlerClass=server.SimpleHTTPRequestHandler, ServerClass=server.HTTPServer, port=port)
```
#### File: brython-jinja2/management/utils.py
```python
from plumbum import cli
class M(cli.Application):
subcommands = {}
@classmethod
def print_commands(cls, root=None, indent=0):
if root is None:
root = cls.subcommands
for name, (app, sub_cmds) in root.items():
print(" "*indent, "Name:", name, "App:", app._NAME)
cls.print_commands(root=sub_cmds, indent=indent+2)
@classmethod
def command(cls, name=None):
postfix = name
def decorator(method):
if postfix is None:
name = method.__name__
else:
name = postfix
mod = method.__module__
if mod.startswith('management'):
mod = mod[len('management'):]
mod = mod.lstrip('.')
if mod == '__main__':
full_name = name
else:
full_name = mod+'.'+name
#print("Registering command", full_name)
app = cls
subcmds = cls.subcommands
for sub in full_name.split('.')[:-1]:
if sub not in subcmds:
#print(" Defining subcommand", sub)
sub_app = type(sub+'App', (cli.Application,),{})
sub_app = app.subcommand(sub)(sub_app)
subcmds[sub] = (sub_app, {})
else:
#print(" Subcommand defined", sub)
pass
app, subcmds = subcmds[sub]
#print("* Defining subcommand", name)
def main(self, *args):
method(*args)
newclass = type(name+'App', (cli.Application,),{"main": main})
newclass = app.subcommand(name)(newclass)
return method
return decorator
```
#### File: jonathanverner/brython-jinja2/snippets.py
```python
from brython_jinja2.utils import htmlparser as p
p.parse_attrs('ahoj')
p.parse_attrs('ahoj="10"')
class InvalidSyntax(Exception):
def __init__(self, message, src, pos):
super().__init__(message)
self.message = message
self.src=src
self.pos=pos
def __str__(self):
return "InvalidSyntax at "+str(self.pos)+": "+self.message+"\nsrc: "+self.src+"\n "+" "*self.pos+"^"
from brython_jinja2.template import Template
from brython_jinja2.context import Context
from brython_jinja2.platform import bs4
from browser import document as doc
ctx = Context()
d = bs4.Tag('<div></div>')
t = Template("""
<div class='{{ " ".join(css_classes) }}' id='{{ id }}' style='border:solid,1px,black'>
Ahoj {{ name }}<br/>
List: [{{ ",".join([str(a) for a in abc]) }}]<br/>
Index: {{ x }}<br/>
</div>
<div>
Name: <input type='text' value='{{ name }}' data-value-source='name' data-update-source-on='input' />
Number: <input type='text' value='{{ x+10 }}' data-value-source='x' data-update-source-on='input' />
List Element: <input type='text' value='{{ abc[x] }}' data-value-source='abc[x]' data-update-source-on='input' />
</div>
""")
ctx.css_classes=['red','green','blue']
ctx.id='test'
ctx.name='Jonathane'
ctx.x=0
ctx.abc=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
doc <= d._elt
f = t.render(ctx, d)
t._rendered_nodes[-2]._children[-2]._value_expr._src.strip('{{').strip('}}')
from brython_jinja2.template import Template
from brython_jinja2.context import Context
from brython_jinja2.platform import bs4
from browser import document as doc
ctx = Context()
d = bs4.Tag('<div></div>')
doc <= d._elt
t = Template("""<input type='text' value='{{ name }}' data-value-source='name' />""")
f = t.render(ctx, d)
from asyncio import coroutine, sleep
@coroutine
def dp(self):
yield sleep(1)
print("Ahoj")
c = dp(1)
from brython_jinja2 import context as ctx
c = ctx.Context()
c.a = 6
c.b = ctx.Immutable(7)
c.d = ctx.Immutable([1,2,3,4])
c.d.append(5)
print(c.d)
print(c)
print(c.immutable_attrs)
from brython_jinja2 import context as ctx
from brython_jinja2 import expression as exp
c = ctx.Context()
e, _ = exp.parse('(1+4)*4+x')
e.simplify()
c.x = 10
e.bind_ctx(c)
e.solve(10, exp.IdentNode('x'))
c.x = ctx.Immutable(6)
e.bind_ctx(c)
exp.simplify(e)
from brython_jinja2 import expression as exp
exp.parse('(10+30+1+10*/)+30')
```
#### File: src/brython_jinja2/rendernodes.py
```python
from asyncio import coroutine
from . import exceptions
from . import templatenodes as nodes
from . import environment
from . import interpolatedstr
from . import expression
from .platform import bs4
from .utils import delayedupdater
class RenderFactory:
AVAILABLE = {}
@classmethod
def register(cls, NodeType, RenderType):
cls.AVAILABLE[NodeType] = RenderType
def __init__(self, env):
self.env = env
self.active = { k:v for k,v in self.AVAILABLE.items() }
def from_node(self, node):
if not type(node) in self.active:
raise exceptions.RenderError("No renderer available for node "+str(node), location=node._location)
return self.active[type(node)](tpl_node=node, factory=self)
default_factory = RenderFactory(environment.default_env)
def register_render_node(Node):
def decorator(cls):
RenderFactory.register(Node, cls)
return cls
return decorator
class RenderNode(delayedupdater.DelayedUpdater):
def __init__(self, tpl_node=None, factory=default_factory):
super().__init__()
self._tpl_node = tpl_node
self._factory = factory
self._children = [self._factory.from_node(ch) for ch in tpl_node.children]
self._parent = None
for ch in self._children:
ch.bind('change', self._child_change_handler)
def clone(self, clone_into=None):
if clone_into is None:
clone_into = type(self)()
clone_into._tpl_node = self._tpl_node
clone_into._children = [ch.clone() for ch in self._children]
return clone_into
@coroutine
def render_into(self, ctx, parent=None):
self._parent = parent
for ch in self._children:
yield ch.render_into(ctx, parent=self._parent)
def destroy(self):
for ch in self._children:
ch.unbind('change')
ch.destroy()
@register_render_node(nodes.Content)
class HTMLElement(RenderNode):
UPDATE_ON = 'blur'
def __init__(self, tpl_node=None, factory=default_factory):
super().__init__(tpl_node, factory)
self._attrs = {}
self._dynamic_attrs = [ exp.clone() for exp in tpl_node._dynamic_attrs ]
for attr, val in tpl_node._args.items():
if isinstance(val, interpolatedstr.InterpolatedStr):
self._attrs[attr] = val.clone()
else:
self._attrs[attr] = val
self._value_expr = None
self._source_expr = None
self._update_on = self.UPDATE_ON
def clone(self, clone_into=None):
clone_into = super().clone(clone_into)
for attr, val in self._attrs.items():
if isinstance(val, interpolatedstr.InterpolatedStr):
clone_into._attrs[attr] = val.clone()
else:
clone_into._attrs[attr] = val
return clone_into
def _setup_value_binding(self, val):
self._value_expr = val.get_ast(0, strip_str=True)
src = self._attrs.get('data-value-source', None)
if isinstance(src, interpolatedstr.InterpolatedStr):
raise exceptions.TemplateSyntaxError('Value source must be a bare expression, not an interpolated string: '+str(src), src=None, location=self._tpl_node._location)
if src is None:
raise NotImplementedError("Guessing value source not supported yet, please provide a data-value-source attribute at "+str(self._tpl_node._location))
self._source_expr, _ = expression.parse(src)
self._update_on = self._attrs.get('data-update-source-on', self.UPDATE_ON)
self._elt._elt.bind(self._update_on, self._update_source)
@coroutine
def render_into(self, ctx, parent):
tn = self._tpl_node
self._elt = bs4.Tag("<"+tn._name+"><"+tn._end_name+">")
for attr, val in self._attrs.items():
if isinstance(val, interpolatedstr.InterpolatedStr):
val.bind_ctx(ctx)
val.bind('change', self._change_handler)
self._elt[attr] = val.value
if attr.lower() == 'value':
self._setup_value_binding(val)
else:
self._elt[attr]=val
for da in self._dynamic_attrs:
da.bind_ctx(ctx)
da.bind('change', self._change_handler)
try:
for attr, val in da.value.items():
self._elt[attr]=val
except:
pass
for ch in self._children:
yield ch.render_into(ctx, self._elt)
parent.append(self._elt)
def destroy(self):
super().destroy()
for val in self._attrs.values():
if isinstance(val, interpolatedstr.InterpolatedStr):
val.unbind('change')
for da in self._dynamic_attrs:
da.unbind('change')
self._elt.decompose()
def _update_source(self, evt):
if self._elt['value'] == self._value_expr.value:
print("VALUE UNCHANGED", self._value_expr.value)
return
else:
try:
self._value_expr.solve(self._elt['value'], self._source_expr)
except Exception as ex:
print("Context:", self._value_expr._ctx)
print("Source equiv to Value:", self._value_expr.equiv(self._source_expr))
print("Value Expr:", self._value_expr)
print("Source Expr:", self._source_expr)
print("Orig value:", self._elt['value'])
print("Exception setting value:", str(ex))
print("Final value", self._value_expr.value)
self._elt['value'] = self._value_expr.value
def _update(self):
for attr, val in self._attrs.items():
if isinstance(val, interpolatedstr.InterpolatedStr):
self._elt[attr] = val.value
for da in self._dynamic_attrs:
try:
for attr, val in da.value.items():
self._elt[attr]=val
except:
pass
@register_render_node(nodes.Content)
class Text(RenderNode):
def __init__(self, tpl_node=None, factory=default_factory):
super().__init__(tpl_node, factory)
self._interpolated = tpl_node._interpolated.clone()
def clone(self, clone_into=None):
clone_into = super().clone(clone_into)
clone_into._interpolated = self._interpolated.clone()
return clone_into
@coroutine
def render_into(self, ctx, parent):
self._interpolated.bind_ctx(ctx)
self._elt = bs4.NavigableString(self._interpolated.value)
parent.append(self._elt)
self._interpolated.bind('change', self._change_handler)
def _update(self):
self._elt.replace_with(self._interpolated.value)
def destroy(self):
super().destroy()
self._interpolated.unbind('change')
self._elt.decompose()
```
#### File: brython_jinja2/utils/__init__.py
```python
class Location:
@classmethod
def location_from_pos(cls, src, pos, name=None, filename=None):
loc = Location(src, name=name, filename=filename, ln=0, col=0, pos=0)
for c in src:
loc._inc_pos()
if c == '\n':
loc._newline()
if loc.pos >= pos:
break
return loc
def __init__(self, src='', name=None, filename=None, ln=0, col=0, pos=0):
self._src = src
self._name = name
self._fname = filename
self._ln = ln
self._col = col
self._pos = pos
@property
def line(self):
return self._ln
@property
def column(self):
return self._col
@property
def pos(self):
return self._pos
def _inc_pos(self, delta=1):
self._pos += delta
self._col += delta
def _newline(self):
self._ln += 1
self._col = 0
def clone(self):
return Location(self._src, name=self._name, filename=self._fname, ln=self._ln, col=self._col, pos=self._pos)
def context(self, num_ctx_lines=4):
ln = self.line
col = self.column
# Get the Context
src_lines = self._src.split('\n')
# If there is just a single line, don't bother with line numbers and context lines
if len(src_lines) < 2:
return ["src: "+self._src," "+" "*col+"^"]
start_ctx = max(ln-num_ctx_lines,0)
end_ctx = min(ln+num_ctx_lines+1,len(src_lines))
prev_lines = src_lines[start_ctx:ln]
post_lines = src_lines[ln+1:end_ctx]
# Get the current line with a caret indicating the column
cur_lines = ['', src_lines[ln], " "*col+"^"]
# Prepend line numbers & current line marker
line_num_len = len(str(end_ctx))
for i in range(len(prev_lines)):
prev_lines[i] = ' '+str(start_ctx+i).ljust(line_num_len+2) + prev_lines[i]
cur_lines[1] = '> '+str(ln).ljust(line_num_len)+ cur_lines[1]
cur_lines[2] = ' '+''.ljust(line_num_len) + cur_lines[2]
for i in range(len(post_lines)):
post_lines[i] = ' '+str(ln+i).ljust(line_num_len+2) + post_lines[i]
return prev_lines+post_lines+cur_lines
def __str__(self):
ret = '{ln}, {col}'.format(ln=self.line, col=self.column)
if self._fname is not None:
ret+="("+self._fname+")"
if self._name is not None:
ret+=self._name
return ret
def __repr__(self):
return str(self)
```
#### File: brython/browser/document.py
```python
from .html import MockElement, MockDomElt
class body(MockElement):
def __init__(self):
super().__init__('body')
def __getitem__(self,id):
if self.id == id:
return self
else:
return self._findChild(id)
def _reset(self):
self.attributes.clear()
self.children = []
self.parent = None
self.elt = MockDomElt(self)
self.text = ''
document = body()
class window:
document = document
```
#### File: brython/browser/timer.py
```python
class Timer:
_timers = {}
@classmethod
def run_deferred(cls,elapsed):
timers = cls._timers.copy()
for timer in timers.keys():
if timer.interval < elapsed:
timer.run()
timer.clear()
def __init__(self,meth,interval):
self._timers[self] = (interval,meth)
self.interval = interval
self.meth = meth
def run(self):
self.meth()
def clear(self):
try:
del self._timers[self]
except:
pass
def set_interval(meth,msec):
meth()
return None
def clear_interval(timer):
return
```
#### File: tests/brython_jinja2/test_expression.py
```python
from unittest.mock import patch
from pytest import raises
from tests.utils import TObserver
import brython_jinja2.expression as exp
from brython_jinja2.context import Context, Immutable
def test_parse_number():
assert exp.parse_number(" 123.5.6", 1) == (123.5, 6)
assert exp.parse_number(" 123.5z", 1) == (123.5, 6)
assert exp.parse_number(" -123.5z", 1) == (-123.5, 7)
def test_parse_string():
assert exp.parse_string("'ahoj\\''", 0) == ("ahoj\'", 8)
assert exp.parse_string('"123456"', 0) == ("123456", 8)
assert exp.parse_string('"\\\\3456"', 0) == ("\\3456", 8)
def test_tokenize():
tl = list(exp.tokenize("a - b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "-", 3),
(exp.T_IDENTIFIER, "b", 5),
]
tl = list(exp.tokenize("'123'+123.5==[ahoj]"))
assert tl == [
(exp.T_STRING, "123", 5),
(exp.T_OPERATOR, "+", 6),
(exp.T_NUMBER, 123.5, 11),
(exp.T_OPERATOR, '==', 13),
(exp.T_LBRACKET, '[', 14),
(exp.T_IDENTIFIER, 'ahoj', 18),
(exp.T_RBRACKET, ']', 19)
]
tl = list(exp.tokenize("a is not b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "is not", 10),
(exp.T_IDENTIFIER, 'b', 12),
]
tl = list(exp.tokenize("a is b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "is", 4),
(exp.T_IDENTIFIER, 'b', 12),
]
tl = list(exp.tokenize("a != b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "!=", 4),
(exp.T_IDENTIFIER, 'b', 12),
]
tl = list(exp.tokenize("a <= b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "<=", 4),
(exp.T_IDENTIFIER, 'b', 12),
]
tl = list(exp.tokenize("a = b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_EQUAL, "=", 3),
(exp.T_IDENTIFIER, 'b', 11),
]
tl = list(exp.tokenize("a < b"))
assert tl == [
(exp.T_IDENTIFIER, "a", 1),
(exp.T_OPERATOR, "<", 3),
(exp.T_IDENTIFIER, 'b', 11),
]
tl = list(exp.tokenize("for a in lst"))
assert tl == [
(exp.T_KEYWORD, "for", 3),
(exp.T_IDENTIFIER, "a", 5),
(exp.T_KEYWORD, 'in', 8),
(exp.T_IDENTIFIER, 'lst', 12),
]
def parse_mock(token_stream, end_tokens=[]):
if len(token_stream) == 0:
raise Exception("End of stream")
tok, val, pos = token_stream.pop(0)
if tok == exp.T_COLON:
return exp.IdentNode('None'), tok, 0
elif tok == exp.T_RBRACKET:
return None, tok, 0
if tok == exp.T_IDENTIFIER:
return exp.IdentNode(val), token_stream.pop(0)[0], 0
else:
return exp.ConstNode(val), token_stream.pop(0)[0], 0
@patch('brython_jinja2.expression._parse', parse_mock)
def test_parse_args():
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COMMA, ',', 0),
(exp.T_IDENTIFIER, 'b', 0),
(exp.T_COMMA, ',', 0),
(exp.T_IDENTIFIER, 'c', 0),
(exp.T_EQUAL, '=', 0),
(exp.T_NUMBER, 123, 0),
(exp.T_RPAREN, ')', 0)
]
assert str(exp.parse_args(token_stream)) == "([a, b], {'c': 123})"
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COMMA, ',', 0),
(exp.T_IDENTIFIER, 'b', 0),
(exp.T_RPAREN, ')', 0)
]
assert str(exp.parse_args(token_stream)) == "([a, b], {})"
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_EQUAL, '=', 0),
(exp.T_IDENTIFIER, 'b', 0),
(exp.T_RPAREN, ')', 0)
]
assert str(exp.parse_args(token_stream)) == "([], {'a': b})"
@patch('brython_jinja2.expression._parse', parse_mock)
def test_parse_lst():
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COMMA, ',', 0),
(exp.T_NUMBER, 123, 0),
(exp.T_COMMA, ',', 0),
(exp.T_STRING, 'ahoj', 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_lst(token_stream)) == "[a, 123, 'ahoj']"
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_KEYWORD, 'for', 0),
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_KEYWORD, 'in', 0),
(exp.T_IDENTIFIER, 'lst', 0),
(exp.T_RBRACKET, ']', 0)
]
c = exp.parse_lst(token_stream)
assert str(c) == "[a for a in lst]"
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_KEYWORD, 'for', 0),
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_KEYWORD, 'in', 0),
(exp.T_IDENTIFIER, 'lst', 0),
(exp.T_KEYWORD, 'if', 0),
(exp.T_IDENTIFIER, 'True', 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_lst(token_stream)) == "[a for a in lst if True]"
@patch('brython_jinja2.expression._parse', parse_mock)
def test_parse_slice():
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_slice(token_stream)) == '(False, a, None, None)'
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COLON, ':', 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_slice(token_stream)) == '(True, a, None, None)'
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COLON, ':', 0),
(exp.T_NUMBER, 123, 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_slice(token_stream)) == '(True, a, 123, None)'
token_stream = [
(exp.T_COLON, ':', 0),
(exp.T_NUMBER, 123, 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_slice(token_stream)) == '(True, None, 123, None)'
token_stream = [
(exp.T_IDENTIFIER, 'a', 0),
(exp.T_COLON, ':', 0),
(exp.T_NUMBER, 123, 0),
(exp.T_COLON, ':', 0),
(exp.T_IDENTIFIER, 'b', 0),
(exp.T_RBRACKET, ']', 0)
]
assert str(exp.parse_slice(token_stream)) == '(True, a, 123, b)'
def test_parse_interpolated_string():
ctx = Context()
ctx.name = 'Name'
_, asts = exp.parse_interpolated_str('Test text {{ 1+3 }} other text {{ "ahoj" }} final text.')
val = "".join([ast.evalctx(ctx) for ast in asts])
assert val == 'Test text 4 other text ahoj final text.'
_, asts = exp.parse_interpolated_str('Test text {{ 1+3 }} other text {{ name }} final text.')
val = "".join([ast.evalctx(ctx) for ast in asts])
assert val == 'Test text 4 other text Name final text.'
_, asts = exp.parse_interpolated_str('Test text {{ 1+3 }} other text {{ len(name) }} final text.')
val = "".join([ast.evalctx(ctx) for ast in asts])
assert val == 'Test text 4 other text 4 final text.'
_, asts = exp.parse_interpolated_str('Test text {{ "{{{{}}{}{}}}" }} other }}')
val = "".join([ast.evalctx(ctx) for ast in asts])
assert val == 'Test text {{{{}}{}{}}} other }}'
def test_parse():
ctx = Context()
# Test Simple Arithmetic Expressions
ast, _ = exp.parse('(1+1*8)*9')
assert ast.evalctx(ctx) is 81
# Test Simple Arithmetic Expressions
ast, _ = exp.parse('(1-1)')
assert ast.evalctx(ctx) is 0
# Test Simple Arithmetic Expressions
ast, _ = exp.parse('(-1)')
assert ast.evalctx(ctx) is -1
# Test Boolean Expressions
ast, _ = exp.parse('True and False')
assert ast.evalctx(ctx) is False
ast, _ = exp.parse('True and not False')
assert ast.evalctx(ctx) is True
# Test is
ast, _ = exp.parse("1 is None")
assert ast.evalctx(ctx) is False
ast, _ = exp.parse("None is None")
assert ast.evalctx(ctx) is True
ast, _ = exp.parse("False is not None")
assert ast.evalctx(ctx) is True
# Test Slices
ctx.s = "abcde"
ast, _ = exp.parse('s[-1]')
assert ast.evalctx(ctx) == 'e'
ast, _ = exp.parse('s[0]')
assert ast.evalctx(ctx) == 'a'
ast, _ = exp.parse('s[1:3]')
assert ast.evalctx(ctx) == 'bc'
ast, _ = exp.parse('s[0:-1:2]')
assert ast.evalctx(ctx) == 'ac'
ast, _ = exp.parse('s[1:]')
assert ast.evalctx(ctx) == 'bcde'
ast, _ = exp.parse('s[:-1]')
assert ast.evalctx(ctx) == 'abcd'
# Test Lists
ast, _ = exp.parse('[1,2,3,4]')
assert ast.evalctx(ctx) == [1, 2, 3, 4]
# Test Comprehension
ast, _ = exp.parse('[p+1 for p in [1,2,3,4]]')
assert ast.evalctx(ctx) == [2, 3, 4, 5]
ast, _ = exp.parse('[p+1 for p in [1,2,3,4] if p%2==0]')
assert ast.evalctx(ctx) == [3, 5]
# Test Builtins
ast, _ = exp.parse("str(10)")
assert ast.evalctx(ctx) == "10"
ast, _ = exp.parse("int('21')")
assert ast.evalctx(ctx) == 21
ast, _ = exp.parse("len([1,2,3])")
assert ast.evalctx(ctx) == 3
ctx.str = lambda x: "str("+str(x)+")"
ast, _ = exp.parse("str(10)")
assert str(ast) == "str(10)"
del ctx.str
# Test Object Access
ctx.obj = Context()
ctx.obj.a = 10
ctx.obj.b = Context()
ctx.obj.b.c = 20
ctx.obj.d = [Context({'a': 30})]
ast, _ = exp.parse('obj.a')
assert ast.evalctx(ctx) == 10
ast, _ = exp.parse('obj.b.c')
assert ast.evalctx(ctx) == 20
ast, _ = exp.parse('obj.d[0].a')
assert ast.evalctx(ctx) == 30
# Test Array Access
ast, _ = exp.parse('mylst[0][1][2]')
ctx.mylst = [[None, [None, None, "Ahoj"]]]
assert ast.evalctx(ctx) == "Ahoj"
# Test String slices
ast, _ = exp.parse('"ahoj"[1:]')
assert ast.evalctx(ctx) == "hoj"
ast, _ = exp.parse('"ahoj"[:1]')
assert ast.evalctx(ctx) == "a"
ast, _ = exp.parse('"ahoj"[-1]')
assert ast.evalctx(ctx) == "j"
# Test array concatenation
ast, _ = exp.parse('([0]+["mixin"])[1]')
assert ast.evalctx(ctx) == "mixin"
# Test Function Calls
ast, _ = exp.parse('"a,b,c,d".split(",")')
assert ast.evalctx(ctx) == ['a', 'b', 'c', 'd']
ctx.func = lambda x, ev: str(x+10)+ev
ctx.ch = 20
ctx.s = 'Hi'
ast, _ = exp.parse("func(ch,ev=s)")
ast.bind_ctx(ctx)
ctx.s = 'Hello'
assert ast.eval() == '30Hello'
assert ast.evalctx(ctx) == '30Hello'
# Test Complex Expressions
expr = '(1+2*obj.a - 10)'
ast, _ = exp.parse(expr)
assert ast.evalctx(ctx) == 11
expr = '[(1+2*a[1+3] - 10) for a in [[2,1,2,3,4,5],[1,2],[2,2,2,2,2,2,2]] if a[0] % 2 == 0]'
ast, _ = exp.parse(expr)
assert ast.evalctx(ctx) == [-1, -5]
# Test parse cache
for i in range(10):
expr = '[(1+2*a[1+3] - 10) for a in [[2,1,2,3,4,5],[1,2],[2,2,2,2,2,2,2]] if a[0] % 2 == 0]'
ast, _ = exp.parse(expr)
assert ast.evalctx(ctx) == [-1, -5]
def test_is_func():
ast, _ = exp.parse('(1+1*x)*9')
assert ast.is_function_call() is False
ast, _ = exp.parse('x')
assert ast.is_function_call() is False
ast, _ = exp.parse('f(1+1*x)')
assert ast.is_function_call() is True
ast, _ = exp.parse('a.b[10].f(1+1*x)')
assert ast.is_function_call() is True
def test_is_ident():
ctx = Context()
ast, _ = exp.parse('(1+1*x)*9')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('f(1+1*x)')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('None')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('1')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('"ahoj"')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('[1,2,3]')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('x[1:2:3]')
ast.bind_ctx(ctx)
assert ast.is_assignable() is False
ast, _ = exp.parse('a.b[10].f')
ast.bind_ctx(ctx)
assert ast.is_assignable() is True
ast, _ = exp.parse('a.b[x].f')
ast.bind_ctx(ctx)
assert ast.is_assignable() is True
ast, _ = exp.parse('a.b[x]')
ast.bind_ctx(ctx)
assert ast.is_assignable() is True
ast, _ = exp.parse('a.b')
ast.bind_ctx(ctx)
assert ast.is_assignable() is True
ast, _ = exp.parse('x')
ast.bind_ctx(ctx)
assert ast.is_assignable() is True
class TestCall(object):
def setup_method(self, method):
self.called = False
self.ctx = Context()
def test_call(self):
self.obj = None
self.event = None
def handler(x, event):
self.obj = x
self.event = event
self.called = True
self.ctx.handler = handler
self.ctx.ch = 10
ast, _ = exp.parse("handler(ch)")
ast.bind_ctx(self.ctx)
assert self.called is False
ast.call(event='Event')
assert self.obj == 10
assert self.event == 'Event'
assert self.called is True
self.called = False
a = ast.clone()
assert a.is_function_call()
assert self.called is False
def test_eval_assignment():
ctx = Context()
# Do not allow assigning to non-trivial expressions
ast, _ = exp.parse('(1+1*x)*9')
with raises(exp.ExpressionError):
ast.value = 10
# Do not allow assigning to built-in constants
ast, _ = exp.parse('True')
with raises(exp.ExpressionError):
ast.value = 10
# Do not allow assigning to function calls
ast, _ = exp.parse('f(1)')
with raises(exp.ExpressionError):
ast.value = 10
# Do not allow assigning to constant lists
ast, _ = exp.parse("[1,2,3,4]")
with raises(exp.ExpressionError):
ast.value = 10
# Do not allow assigning to constants
ast, _ = exp.parse("'ahoj'")
with raises(exp.ExpressionError):
ast.value = 10
# Allow assigning to non-existing variables
ast, _ = exp.parse('x')
ast.bind_ctx(ctx)
ast.value = 10
assert ctx.x == 10
# Allow assigning to existing variables
ast.value = 20
assert ctx.x == 20
# Allow assigning to list elements
ctx.lst = [1, 2, 3]
ctx.x = 0
ast, _ = exp.parse("lst[x]")
ast.bind_ctx(ctx)
ast.value = 20
assert ctx.lst[0] == 20
# Allow assigning to non-existing object attributes
ctx.obj = Context()
ast, _ = exp.parse('obj.test')
ast.bind_ctx(ctx)
ast.value = 30987
assert ctx.obj.test == 30987
# Allow assigning to existing object attributes
ast.value = 40
assert ctx.obj.test == 40
def test_simplify():
ctx = Context()
ctx.y = Immutable(10)
ast, _ = exp.parse('(1+4)*4+x')
assert str(ast.simplify()) == '20 + x'
ast, _ = exp.parse('(1+y)*4+x')
ast.bind_ctx(ctx)
assert str(ast.simplify(assume_const=[exp.IdentNode('y')])) == '44 + x'
def test_solve():
ctx = Context()
ast, _ = exp.parse('(1+4)*4+x')
ctx.x = 0
ast.bind_ctx(ctx)
ast.solve(10, exp.IdentNode('x'))
assert ctx.x == -10
class MockObject(object):
def __init__(self, depth=0):
if depth > 0:
self.child = MockObject(depth-1)
else:
self.leaf = True
class TestExpressionChanges(object):
def setup_method(self, method):
self.ctx = Context()
self.ctx._clear()
def prepare(self, expr):
self.obs, _ = exp.parse(expr)
self.obs.bind_ctx(self.ctx)
try:
self.obs.eval()
except:
pass
self.t = TObserver(self.obs)
def exec_test(self, new):
data = self.t.events.pop().data
if new is not None:
assert self.obs.value == new
else:
assert self.obs.cache_status is False
try:
self.obs.eval()
except:
pass
assert self.obs.defined is False
assert 'value' not in data
def test_clone(self):
self.prepare("x**2 + x")
clone = self.obs.clone()
ctx = Context()
ctx.x = 0
clone.bind_ctx(ctx)
self.ctx.x = 1
assert clone.value == 0
assert self.obs.value == 2
def test_arithmetic_exp(self):
self.ctx.a = 1
self.ctx.b = -2
self.ctx.c = 0.5
self.prepare("a*x**2 + b*x + c*x")
assert self.obs.cache_status is False
assert self.obs.defined is False
self.ctx.d = 10
assert len(self.t.events) == 0
self.ctx.x = 0
self.exec_test(0)
self.ctx.x = 1
self.exec_test(-0.5)
def test_comprehension(self):
self.ctx.lst = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.prepare("[p+1 for p in lst if p%2 == 0]")
assert self.obs.cache_status is True
self.ctx.lst.append(4)
assert self.obs.cache_status is False
self.exec_test([-3, -1, 1, 3, 5, 5])
self.ctx.lst.remove(4)
self.exec_test([-3, -1, 1, 3, 5])
self.ctx.lst.clear()
self.exec_test([])
def test_attr_acces(self):
self.ctx.root = MockObject(depth=3)
self.prepare("root.child.child.child.leaf and True")
assert self.obs.value is True
assert self.obs.cache_status is True
self.ctx.root.child.child.child.leaf = False
assert self.obs.cache_status is False
self.exec_test(False)
self.ctx.root.child = None
self.exec_test(None)
def test_func(self):
self.ctx.func = lambda x, y: x+y
self.prepare("func(a,b)")
assert self.obs.cache_status is False
self.ctx.a = 10
assert self.obs.cache_status is False
self.ctx.b = 20
assert self.obs.cache_status is False
self.exec_test(30)
self.ctx.b = 30
self.exec_test(40)
self.ctx.func = lambda x, y: x*y
self.exec_test(300)
del self.ctx.a
self.exec_test(None)
def test_array_index(self):
self.ctx.lst = [[1, 2, 3], 2, 3, 4, 5]
self.ctx.a = 0
self.prepare("lst[0][a]")
assert self.obs.cache_status is True
assert self.obs.value == 1
self.ctx.lst[1] = 2
self.exec_test(1)
self.ctx.a = 2
self.exec_test(3)
self.ctx.a = 3
self.exec_test(None)
self.ctx.lst[0].append(4)
self.exec_test(4)
self.ctx.lst.pop()
self.exec_test(4)
```
#### File: tests/brython_jinja2/test_location.py
```python
from brython_jinja2.utils import Location
src = """
from brython_jinja2.utils import Location
def a(test):
print("AHOJ")
def test_ctx():
loc = Location("abc")
assert
""".strip()
def test_location_context_single_line():
loc = Location(src="ahoj")
assert len(loc.context(num_ctx_lines=20)) == 2
def test_location_from_pos():
loc = Location.location_from_pos(src=src, pos=src.find("AHOJ"))
assert loc.line == 3
assert loc.column == 11
assert loc.pos == src.find("AHOJ")
def test_location_context():
loc = Location.location_from_pos(src=src, pos=src.find("AHOJ"))
ctx = loc.context(num_ctx_lines = 1)
assert len(ctx) == 5
loc = Location.location_from_pos(src=src, pos=src.find("from"))
ctx = loc.context(num_ctx_lines = 1)
assert len(ctx) == 4
loc = Location.location_from_pos(src=src, pos=src.find("assert"))
ctx = loc.context(num_ctx_lines = 1)
assert len(ctx) == 4
loc = Location.location_from_pos(src=src, pos=src.find("def"))
ctx = loc.context(num_ctx_lines = 20)
assert len(ctx) == 2+len(src.split('\n'))
ctx = loc.context(num_ctx_lines = 2)
assert len(ctx) == 7
```
#### File: brython-jinja2/tests/utils.py
```python
class TObserver(object):
def __init__(self, observer):
self.events = []
observer.bind('change', self.handler)
def handler(self, event):
self.events.append(event)
```
#### File: web/lib/console.py
```python
import sys
import traceback
from browser import window
class Console:
"""
A class providing a console widget. The constructor accepts
a domnode which should be a textarea and it takes it over
and turns it into a python interactive console.
"""
_credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.
"""
_copyright = """Copyright (c) 2012, <NAME> <EMAIL>
All Rights Reserved.
Copyright (c) 2001-2013 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved.
"""
_license = """Copyright (c) 2012, <NAME> <EMAIL>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided
with the distribution.
Neither the name of the <ORGANIZATION> nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, elem):
self._elem = elem
self.credits.__repr__ = lambda: Console._credits
self.copyright.__repr__ = lambda: Console._copyright
self.license.__repr__ = lambda: Console._license
self._redirected = False
self._oldstdout = None
self._oldstderr = None
self.history = []
self.current = 0
self._status = "main" # or "block" if typing inside a block
self.current_line = ""
# execution namespace
self.editor_ns = {
'credits': self.credits,
'copyright': self.copyright,
'license': self.license,
'__name__': '__console__',
}
self._elem.bind('keypress', self.my_key_press)
self._elem.bind('keydown', self.my_key_down)
self._elem.bind('click', self.cursor_to_end)
version = sys.implementation.version
self._elem.value = "Brython %s.%s.%s on %s %s\n%s\n>>> " % (version[0],
version[1],
version[2],
window.navigator.appName,
window.navigator.appVersion,
'Type "copyright()", "credits()" or "license()" for more information.')
self._elem.focus()
self.cursor_to_end()
def add_to_ns(self, key, value):
"""
Adds key to the console's local scope. Think:
```
key=value
```
"""
self.editor_ns[key] = value
def _redirect_out(self):
if self._redirected:
sys.__console__ = False
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self._redirected = False
else:
sys.__console__ = True
self._oldstdout = sys.stdout
self._oldstderr = sys.stderr
sys.stdout = self
sys.stderr = self
self._redirected = True
def credits(self):
self.write(self._credits)
def copyright(self):
self.write(self._copyright)
def license(self):
self.write(self._license)
def write(self, data):
self._elem.value += str(data)
def cursor_to_end(self, *_args):
pos = len(self._elem.value)
self._elem.setSelectionRange(pos, pos)
self._elem.scrollTop = self._elem.scrollHeight
def get_col(self, _area):
"""
returns the column position of the cursor
"""
sel = self._elem.selectionStart
lines = self._elem.value.split('\n')
for line in lines[:-1]:
sel -= len(line) + 1
return sel
def my_key_press(self, event):
if event.keyCode == 9: # tab key
event.preventDefault()
self._elem.value += " "
elif event.keyCode == 13: # return
src = self._elem.value
if self._status == "main":
self.current_line = src[src.rfind('>>>') + 4:]
elif self._status == "3string":
self.current_line = src[src.rfind('>>>') + 4:]
self.current_line = self.current_line.replace('\n... ', '\n')
else:
self.current_line = src[src.rfind('...') + 4:]
if self._status == 'main' and not self.current_line.strip():
self._elem.value += '\n>>> '
event.preventDefault()
return
self._elem.value += '\n'
self.history.append(self.current_line)
self.current = len(self.history)
if self._status == "main" or self._status == "3string":
try:
self._redirect_out()
_ = self.editor_ns['_'] = eval(self.current_line, self.editor_ns)
if _ is not None:
self.write(repr(_) + '\n')
self._elem.value += '>>> '
self._status = "main"
except IndentationError:
self._elem.value += '... '
self._status = "block"
except SyntaxError as msg:
if str(msg) == 'invalid syntax : triple string end not found' or \
str(msg).startswith('Unbalanced bracket'):
self._elem.value += '... '
self._status = "3string"
elif str(msg) == 'eval() argument must be an expression':
try:
self._redirect_out()
exec(self.current_line, self.editor_ns)
except:
# pylint: disable=bare-except; any exception can happen here
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
self._status = "main"
elif str(msg) == 'decorator expects function':
self._elem.value += '... '
self._status = "block"
else:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
finally:
self._redirect_out()
elif self.current_line == "": # end of block
block = src[src.rfind('>>>') + 4:].splitlines()
block = [block[0]] + [b[4:] for b in block[1:]]
block_src = '\n'.join(block)
# status must be set before executing code in globals()
self._status = "main"
try:
self._redirect_out()
_ = exec(block_src, self.editor_ns)
if _ is not None:
print(repr(_))
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
else:
self._elem.value += '... '
self.cursor_to_end()
event.preventDefault()
def my_key_down(self, event):
if event.keyCode == 37: # left arrow
sel = self.get_col(self._elem)
if sel < 5:
event.preventDefault()
event.stopPropagation()
elif event.keyCode == 36: # line start
pos = self._elem.selectionStart
col = self.get_col(self._elem)
self._elem.setSelectionRange(pos - col + 4, pos - col + 4)
event.preventDefault()
elif event.keyCode == 38: # up
if self.current > 0:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current -= 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 40: # down
if self.current < len(self.history) - 1:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current += 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 8: # backspace
src = self._elem.value
lstart = src.rfind('\n')
if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6):
event.preventDefault()
event.stopPropagation()
``` |
{
"source": "jonathanverner/makebib",
"score": 3
} |
#### File: makebib/makebib/__init__.py
```python
from datetime import datetime
import logging
import os
import sys
import argparse
from pybtex.__main__ import main as run_bibtex
from pybtex import auxfile
from pybtex import database
from pybtex.database.output.bibtex import Writer as BibTeXWriter
from .about import __version__, __summary__, __title__
logger = logging.getLogger(__name__)
DEFAULT_CFG = {
'db': '~/.makebib/db.bib'
}
CFG_FILES = ['/etc/makebib', '~/.makebib', './.makebib']
def construct_argparser():
parser = argparse.ArgumentParser(
description=__summary__,
epilog="""
CONFIGURATION
The program reads its configuration from """+', '.join(CFG_FILES)+""".
Each configuration option is given on a single line in the form of
key = val
Spaces around '=' are ignored as is everything following the first '#'.
Lines not containing '=' are also ignored. The options are case-insensitive.
Currently the following options (and their default values) are available:
"""+'\n'.join([" " + k + " = " + v for k, v in DEFAULT_CFG.items()])
)
parser.add_argument('--db', help='Path to the central bib dbase')
parser.add_argument('--config', help='Path to the configuration file')
parser.add_argument('--version', help='Print the version and exit', action='store_true', default=False)
parser.add_argument('--license', help='Print the license text and exit', action='store_true', default=False)
command_parsers = parser.add_subparsers(dest='action')
compile_parser = command_parsers.add_parser('compile', help='Create a local bib file for the given TeX-file and run BibTeX')
compile_parser.add_argument('document', help='base filename of the TeX source')
compile_parser.add_argument('bibargs', help='arguments passed to BibTeX', nargs='*')
compile_parser.add_argument('--nobibtex', help='do not run bibtex', action='store_true', default=False)
compile_parser.add_argument('--force-overwrite', help='force overwriting a local bib file', action='store_true', default=False, dest='force_overwrite')
show_parser = command_parsers.add_parser('show', help='Show various information')
showcommand_parsers = show_parser.add_subparsers(help='Information types', dest='info_type')
cited_parser = showcommand_parsers.add_parser('cited', help='Show all the keys cited by the specified TeX document')
cited_parser.add_argument('document', help='base filename of the TeX source')
missing_parser = showcommand_parsers.add_parser('missing', help='Show all the keys cited by the specified TeX document & missing from the central dbase')
missing_parser.add_argument('document', help='base filename of the TeX source')
all_parser = showcommand_parsers.add_parser('all', help='Show all the cite keys in the central dbase')
entry_parser = showcommand_parsers.add_parser('bibentry', help='Show the database entry with the given key')
entry_parser.add_argument('key', help='The citekey of the entry to show')
cfg_parser = showcommand_parsers.add_parser('cfg', help='Show configuration')
return parser
def extract_alt_keys(entry):
alt_keys = []
for field, value in entry.fields.items():
if field.lower() == 'altkeys':
alt_keys.append(value)
return alt_keys
def create_alt_keys_map(db):
map = {}
for key, entry in db.entries.items():
map[key] = entry
for altk in extract_alt_keys(entry):
map[altk] = entry
return map
def make_bib(basename, bib_dbase, force_overwrite=False):
aux_data = auxfile.parse_file(basename + '.aux')
if force_overwrite or not os.path.exists(aux_data.data[0]+'.bib') or os.path.exists('.generated_bib'):
with open('.generated_bib', 'w') as OUT:
OUT.write("The bib file '"+aux_data.data[0]+".bib' was generated by makebib on "+str(datetime.now())+'\n')
OUT.write("Source TeX file: "+basename+'.tex')
OUT.write("BibTeX dbase: "+bib_dbase)
db = database.parse_file(os.path.expanduser(bib_dbase))
alt_map = create_alt_keys_map(db)
outdb = database.BibliographyData({key: alt_map[key] for key in aux_data.citations if key in alt_map})
# The following is copied from pybtex source to work around
# the fact that bib entries are output in random order...
# outdb.to_file(aux_data.data[0] + '.bib', bib_format='bibtex')
writer = BibTeXWriter()
with open(aux_data.data[0] + '.bib', 'w') as BIB:
writer._write_preamble(BIB, outdb.preamble)
first = True
for key, entry in sorted(outdb.entries.items(), key=lambda x: x[0]):
if not first:
BIB.write(u'\n')
first = False
BIB.write(u'@%s' % entry.original_type)
BIB.write(u'{%s' % key)
for role, persons in entry.persons.items():
writer._write_persons(BIB, persons, role)
for type, value in entry.fields.items():
writer._write_field(BIB, type, value)
BIB.write(u'\n}\n')
else:
logger.warn("An existing local bib database found and .generated_bib is not present. Refusing to overwrite.")
logger.warn("(use --force-overwrite to overwrite to ignore)")
def list_cited_keys(basename):
aux_data = auxfile.parse_file(basename + '.aux')
print('\n'.join(aux_data.citations))
def list_db_keys(bib_dbase):
db = database.parse_file(os.path.expanduser(bib_dbase))
print('\n'.join(db.entries.keys()))
def list_missing_keys(basename, bib_dbase):
aux_data = auxfile.parse_file(basename + '.aux')
db = database.parse_file(os.path.expanduser(bib_dbase))
alt_map = create_alt_keys_map(db)
missing = [key for key in aux_data.citations if key not in alt_map]
print('\n'.join(missing))
def show_bibentry(key, bib_dbase):
db = database.parse_file(os.path.expanduser(bib_dbase))
if key in db.entries:
data = database.BibliographyData(entries={key: db.entries[key]})
print(data.to_string(bib_format='bibtex'))
def load_cfg(cfg_file=None):
global CFG_FILES, DEFAULT_CFG
cfg = {}
for k, v in DEFAULT_CFG.items():
cfg[k] = v
if cfg_file is not None:
CFG_FILES.append(cfg_file)
for f in CFG_FILES:
f = os.path.expanduser(f)
if os.path.exists(f):
with open(f, 'r') as IN:
for ln in IN.readlines():
comment_pos = ln.find('#')
if comment_pos > -1:
ln = ln[:comment_pos]
try:
key, val = ln.split('=')
key = key.strip().lower()
val = val.strip()
if len(key) > 0:
cfg[key] = val
except:
pass
return cfg
def main():
args = construct_argparser().parse_args()
CFG = load_cfg(args.config)
if args.db:
CFG['db'] = args.db
if args.version:
print(__title__+' version '+__version__)
exit(1)
if args.license:
license_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'LICENSE.txt')
print(open(license_path).read())
exit(1)
if args.action == 'compile':
make_bib(args.document, CFG['db'], force_overwrite=args.force_overwrite)
if not args.nobibtex:
sys.argv = [sys.argv[0], args.document]+args.bibargs
run_bibtex()
elif args.action == 'show':
if args.info_type == 'cited':
list_cited_keys(args.document)
elif args.info_type == 'missing':
list_missing_keys(args.document, CFG['db'])
elif args.info_type == 'all':
list_db_keys(CFG['db'])
elif args.info_type == 'bibentry':
show_bibentry(args.key, CFG['db'])
elif args.info_type == 'cfg':
for k, v in CFG.items():
print(k, '=', v)
``` |
{
"source": "jonathanvevance/predicting_fgroups_ddp",
"score": 3
} |
#### File: src/utils/train_utils.py
```python
import torch
import torch.nn as nn
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
class weightedLoss(nn.Module):
def __init__(self, torch_loss, weight, device):
"""
Args:
torch_loss : torch criterion class (NOT OBJECT)
weight : torch tensor dealing with class imbalance
"""
super(weightedLoss, self).__init__()
self.weight = weight.to(device)
self.criterion = torch_loss(reduction = 'none')
def forward(self, output, labels):
"""Forward function."""
weight_ = self.weight[labels.data.view(-1).long()].view_as(labels)
loss = self.criterion(output, labels)
loss_class_weighted = loss * weight_
loss_class_weighted = loss_class_weighted.mean()
return loss_class_weighted
class EarlyStopping():
"""A simple Early Stopping implementation."""
def __init__(self, patience = 10, delta = 0):
self.patience = patience
self.delta = delta
self.val_loss_min = None
self.saved_state_dict = None
self.counter = 0
def __call__(self, val_loss, model):
"""Call function."""
if self.val_loss_min is None:
self.val_loss_min = val_loss
self.saved_state_dict = model.state_dict()
return False
change = (self.val_loss_min - val_loss) / self.val_loss_min
if change >= self.delta:
self.counter = 0
self.val_loss_min = val_loss
self.saved_state_dict = model.state_dict()
return False
else:
self.counter += 1
if self.counter > self.patience:
return True
else:
return False
def evaluate_model(model, criterion, val_loader, device, epoch):
"""Function to evaluate a model."""
model.eval()
val_losses_total = 0
with tqdm(val_loader, unit = "batch", leave = True) as tqdm_progressbar:
for idx, (inputs, labels) in enumerate(tqdm_progressbar):
tqdm_progressbar.set_description(f"Epoch {epoch} (validating)")
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_losses_total += loss.item()
val_losses_avg = val_losses_total / (idx + 1)
tqdm_progressbar.set_postfix(val_loss = val_losses_avg)
print_model_accuracy(outputs, labels, loss, threshold = 0.5, mode = "val")
return val_losses_avg
def get_classification_metrics(bin_outputs, labels):
bin_outputs = torch.flatten(bin_outputs).cpu()
labels = torch.flatten(labels).cpu()
precision, recall, fscore, __ = precision_recall_fscore_support(labels, bin_outputs)
accuracy = torch.sum(bin_outputs == labels) / labels.nelement()
return precision, recall, fscore, accuracy
def print_model_accuracy(outputs, labels, loss, threshold = 0.5, mode = 'train'):
bin_outputs = (outputs > threshold).float()
precision, recall, fscore, accuracy = get_classification_metrics(bin_outputs, labels)
print(
f"{mode} minibatch :: accuracy =", accuracy.item(),
f"loss =", loss.item(), f"f1 score = {sum(fscore) / len(fscore)}"
)
def save_model(model, save_path):
"""Save torch model."""
torch.save(model.state_dict(), save_path)
def load_model(model, load_path, device):
"""Load torch model."""
model.load_state_dict(torch.load(load_path, map_location = device))
return model
``` |
{
"source": "jonathan-vidal/daily-questions",
"score": 4
} |
#### File: Solutions/jonathan-vidal/solution.py
```python
def my_max(num_list):
curr_max = float('-inf')
for num in num_list:
if num > curr_max:
curr_max = num
return curr_max
```
#### File: Solutions/jonathan-vidal/solution.py
```python
def is_happy(num_to_check):
curr_num = num_to_check
while True:
if curr_num == 1:
return True
square_sum = 0
while curr_num > 0:
square_sum += (curr_num % 10)**2
curr_num = curr_num // 10
curr_num = square_sum
```
#### File: Solutions/malcolm-smith/solution.py
```python
def mikuPrint(n):
for i in range(n + 1):
if i % 9 == 0:
print('Miku')
elif i % 3 == 0:
print('Mi')
else:
print(i)
mikuPrint(18)
```
#### File: Solutions/DamourYouKnow/solution.py
```python
def mode(arr):
if not arr:
return set()
counts = {}
for num in arr:
if num not in counts:
counts[num] = 1
else:
counts[num] += 1
maximum = max(counts.values())
return {num for num in arr if counts[num] == maximum}
print(mode([])) # {}
print(mode([1,2,3,3,4])) # {3}
print(mode([1,2,3,4,3,4])) # {3,4}
```
#### File: Solutions/malcolm-smith/solution.py
```python
input = [
('<NAME>', ['ABBA']),
('Ghost Rule', ['DECO*27', '<NAME>']),
('Animals', ['<NAME>']),
('Remember The Name', ['<NAME>', 'Eminem', '50 Cent']),
('404 Not Found', [])
]
def songTitle(song):
artists = ''
if len(song[1]) > 1:
for artist in range(len(song[1])):
artists += (song[1][artist] + ', ')
artists += ('and ' + song[1][-1] + ' - ')
elif len(song[1]) == 1:
artists = song[1][0] + ' - '
else:
artists = ''
return artists + song[0]
for song in input:
print(songTitle(song))
```
#### File: Solutions/malcolm-smith/solution.py
```python
def isEvilNumber(n):
count = 0
for char in str(bin(n)[2:]):
if char == '1':
count += 1
if count % 2 == 0:
return True
else:
return False
print(isEvilNumber(4))
``` |
{
"source": "jonathanvijayakumar/artopninja",
"score": 2
} |
#### File: autosarfactorymain/autosarfactory/autosar_ui.py
```python
from enum import Enum
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkFont
import re
import os,itertools
from ttkthemes import ThemedStyle
from .autosarfactory import Referrable
from tkinter import Menu
__resourcesDir__ = os.path.join(os.path.dirname(__file__), 'resources')
__PAD_X__ = 5 # For some additional padding in the column width
class Application(tk.Frame):
def __init__(self, root, autosar_root):
self.__root = root
self.__asr_explorer = None
self.__property_view = None
self.__referred_by_view = None
self.__search_dropdown = None
self.__search_field = None
self.__search_view = None
self.__go_to_menu = None
self.__asr_explorer_menu = None
self.__current_theme = 'scidgreen'
self.__asr_img = tk.PhotoImage(file=os.path.join(__resourcesDir__, 'autosar.png'))
self.__initialize_ui()
self.__asr_explorer_id_to_node_dict = {}
self.__asr_explorer_node_to_id_dict = {} # reverse of the above dict for a faster lookup.
self.__referred_by_view_id_to_node_dict = {}
self.__property_view_id_to_node_dict = {}
self.__search_view_id_to_node_dict = {}
self.__go_to_node_id_in_asr_explorer = None
self.__font__ = tkFont.nametofont('TkHeadingFont')
self.__populate_tree(autosar_root)
def __selectTheme(self, themeName):
style = ThemedStyle(self.__root)
if(themeName == 'scidgreen'):
self.__current_theme='scidgreen'
elif(themeName == 'ubuntu'):
self.__current_theme='ubuntu'
elif(themeName == 'alt'):
self.__current_theme='alt'
elif(themeName == 'equilux'):
self.__current_theme='equilux'
elif(themeName == 'classic'):
self.__current_theme='classic'
elif(themeName == 'vista'):
self.__current_theme='vista'
elif(themeName == 'default'):
self.__current_theme='default'
else:
self.__current_theme='scidgreen'
style.theme_use(self.__current_theme)
def __initialize_ui(self):
# Configure the root object for the Application
self.__root.iconphoto(True, self.__asr_img)
self.__root.title("Autosar Visualizer")
self.__root.minsize(width=800, height=600)
# set theme
style = ThemedStyle(self.__root)
#print(style.themes)
# ['yaru', 'default', 'vista', 'classic', 'scidgreen', 'equilux', 'scidgrey', 'adapta', 'scidpink', 'scidmint', 'plastik', 'alt', 'clearlooks', 'itft1', 'smog', 'clam', 'scidsand', 'kroc', 'radiance', 'black', 'blue', 'arc', 'winxpblue', 'scidblue', 'ubuntu', 'keramik', 'winnative', 'elegance', 'aquativo', 'scidpurple', 'xpnative', 'breeze']
#style.theme_use("equilux") #- very slow for some reason
style.theme_use(self.__current_theme)
# create ui components
menubar = Menu(self.__root)
filemenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Select Theme", menu=filemenu)
filemenu.add_command(label="Default", command=lambda:self.__selectTheme('default'))
filemenu.add_command(label="ScidGreen", command=lambda:self.selectTheme('scidgreen'))
filemenu.add_command(label="Ubuntu", command=lambda:self.__selectTheme('ubuntu'))
filemenu.add_command(label="Classic", command=lambda:self.__selectTheme('classic'))
filemenu.add_command(label="Vista", command=lambda:self.__selectTheme('vista'))
filemenu.add_command(label="Alt", command=lambda:self.__selectTheme('alt'))
filemenu.add_command(label="Equilux", command=lambda:self.__selectTheme('equilux'))
self.__root.config(menu=menubar)
menubar.add_command(label="Exit", command=lambda:self.__client_exit(self.__root))
splitter = tk.PanedWindow(orient=tk.VERTICAL)
top_frame = tk.Frame(splitter)
# Create the autosar explorer
self.__asr_explorer = ttk.Treeview(top_frame, columns=('Type'))
# Set the heading (Attribute Names)
self.__asr_explorer.heading('#0', text='Element')
self.__asr_explorer.heading('#1', text='Type')
# Specify attributes of the columns (We want to stretch it!)
self.__asr_explorer.column('#0', stretch=tk.YES, minwidth=100, width = 0)
self.__asr_explorer.column('#1', stretch=tk.YES, minwidth=100, width = 0)
# Add scroll bars
vsb = ttk.Scrollbar(top_frame, orient="vertical", command=self.__asr_explorer.yview)
hsb = ttk.Scrollbar(top_frame, orient="horizontal", command=self.__asr_explorer.xview)
bottom_frame = tk.Frame(splitter)
# Create the properties tree
self.__property_view = ttk.Treeview(bottom_frame, columns=('Value'))
# Set the heading (Attribute Names)
self.__property_view.heading('#0', text='Property')
self.__property_view.heading('#1', text='Value')
self.__property_view.column('#0', stretch=tk.YES, minwidth=150)
self.__property_view.column('#1', stretch=tk.YES, minwidth=150)
# Add scroll bars
vsb1 = ttk.Scrollbar(bottom_frame, orient="vertical", command=self.__property_view.yview)
hsb1 = ttk.Scrollbar(bottom_frame, orient="horizontal", command=self.__property_view.xview)
# Create the referred_by tree
referenced_by_frame = ttk.Frame(bottom_frame)
referenced_by_label = ttk.Label(referenced_by_frame, text="Referenced By")
self.__referred_by_view = ttk.Treeview(referenced_by_frame, show="tree")
self.__referred_by_view.column('#0', stretch=tk.YES, minwidth=50)
# Add scroll bars
vsb2 = ttk.Scrollbar(referenced_by_frame, orient="vertical", command=self.__referred_by_view.yview)
hsb2 = ttk.Scrollbar(referenced_by_frame, orient="horizontal", command=self.__referred_by_view.xview)
# create the search view
search_frame = ttk.Frame(bottom_frame)
self.__search_type = ttk.Label(search_frame, text="Search Type")
self.__search_dropdown = ttk.Combobox(search_frame, state="readonly", values=["Short Name","Autosar Type","Regular Expression"])
self.__search_field = ttk.Entry(search_frame)
self.__search_field.insert(0, 'search')
search_results_label = ttk.Label(search_frame, text="Results")
self.__search_view = ttk.Treeview(search_frame, show="tree")
self.__search_view.column('#0', stretch=tk.YES, minwidth=50)
# Add scroll bars
vsb3 = ttk.Scrollbar(search_frame, orient="vertical", command=self.__search_view.yview)
hsb3 = ttk.Scrollbar(search_frame, orient="horizontal", command=self.__search_view.xview)
# configure the explorer
self.__search_field.config(foreground='grey')
self.__asr_explorer.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.__property_view.configure(yscrollcommand=vsb1.set, xscrollcommand=hsb1.set)
self.__referred_by_view.configure(yscrollcommand=vsb2.set, xscrollcommand=hsb2.set)
self.__search_view.configure(yscrollcommand=vsb3.set, xscrollcommand=hsb3.set)
# layout
splitter.add(top_frame)
splitter.add(bottom_frame)
splitter.pack(fill=tk.BOTH, expand=1)
# top layout
self.__asr_explorer.grid(row=0, column=0, sticky='nsew')
vsb.grid(row=0, column=1, sticky='ns')
hsb.grid(row=1, column=0, sticky='ew')
# top_frame.rowconfigure(1, weight=1)
top_frame.rowconfigure(0, weight=1)
top_frame.columnconfigure(0, weight=1)
# bottom layout
self.__property_view.grid(row=0, column=0, sticky='nsew')
vsb1.grid(row=0, column=1, sticky='ns')
hsb1.grid(row=1, column=0, sticky='ew')
# referenced_by layout
referenced_by_frame.grid(row=0, column=2, sticky='nsew')
referenced_by_label.grid(row=0, column=2, sticky='ew')
self.__referred_by_view.grid(row=1, column=2, sticky='nsew')
vsb2.grid(row=1, column=3, sticky='ns')
hsb2.grid(row=2, column=2, sticky='ew')
referenced_by_frame.rowconfigure(1, weight=1)
referenced_by_frame.columnconfigure(2, weight=1)
# search frame layout
search_frame.grid(row=0, column=4, sticky='nsew')
self.__search_type.grid(row=0, column=3, sticky='ew')
self.__search_dropdown.grid(row=0, column=4, sticky='ew')
self.__search_dropdown.current(0)
self.__search_field.grid(row=1, column=3,columnspan=2, sticky='ew')
search_results_label.grid(row=2, column=3, sticky='ew', columnspan=2)
self.__search_view.grid(row=3, column=3, sticky='nsew',columnspan=2)
vsb3.grid(row=3, column=5, sticky='ns')
hsb3.grid(row=5, column=3, sticky='ew',columnspan=2)
search_frame.rowconfigure(3, weight=1)
search_frame.columnconfigure(5, weight=1)
bottom_frame.rowconfigure(0, weight=1)
bottom_frame.columnconfigure(0, weight=1)
bottom_frame.columnconfigure(2, weight=1)
# create menu items
self.__go_to_menu = tk.Menu(self.__root, tearoff=0)
self.__go_to_menu.add_command(label='Go to item', command=self.__go_to_node_in_asr_explorer)
self.__asr_explorer_menu = tk.Menu(self.__root, tearoff=0)
self.__asr_explorer_menu.add_command(label='Copy Name', command=self.__copy_name_to_clip_board)
self.__asr_explorer_menu.add_command(label='Copy Path', command=self.__copy_path_to_clip_board)
# bind search entry
self.__search_field.bind('<FocusIn>', self.__on_search_entry_click)
self.__search_field.bind('<FocusOut>', self.__on_search_entry_focusout)
self.__search_field.bind('<Return>', self.__on_search_entry_click)
# bind tree for:
# selection
self.__asr_explorer.bind("<Button-1>", self.__on_asr_explorer_selection)
self.__asr_explorer.bind("<KeyRelease>", self.__on_asr_explorer_key_released)
self.__search_view.bind("<Button-1>", self.__on_search_view_selection)
self.__search_view.bind("<KeyRelease>", self.__on_search_view_key_released)
# right-click
self.__referred_by_view.bind("<Button-3>", self.__on_referred_by_view_right_click)
self.__property_view.bind("<Button-3>", self.__on__properties_view_right_click)
self.__asr_explorer.bind("<Button-3>", self.__on__asr_explorer_right_click)
def __on_search_entry_click(self, event):
search_string = self.__search_field.get()
search_nodes = []
search_type = self.__search_dropdown.get()
if search_string == 'search':
self.__search_field.delete(0, "end") # delete all the text in the entry
self.__search_field.insert(0, '') #Insert blank for user input
self.__search_field.config(foreground='black')
elif search_string != '':
if search_type == "Short Name":
for node in self.__asr_explorer_id_to_node_dict.values():
if node.name is not None and search_string.lower() in node.name.lower():
search_nodes.append(node)
elif search_type == "Autosar Type":
for node in self.__asr_explorer_id_to_node_dict.values():
node_type = node.__class__.__name__
if node_type is not None and search_string.lower() in node_type.lower():
search_nodes.append(node)
elif search_type == "Regular Expression":
for node in self.__asr_explorer_id_to_node_dict.values():
if node.name is not None:
if search_string=='*' or search_string=='+':
search_string="\{0}".format(search_string)
matched_re = re.search(search_string, node.name)
if matched_re is not None and matched_re.group()!='':
search_nodes.append(node)
self.__update_search_view(search_nodes)
def __on_search_entry_focusout(self,event):
if self.__search_field.get() == '':
self.__search_field.insert(0, 'search')
self.__search_field.config(foreground='grey')
def __on_referred_by_view_right_click(self,event):
# find entry
item = self.__referred_by_view.identify('item',event.x,event.y)
if item != '':
self.__go_to_node_id_in_asr_explorer = self.__asr_explorer_node_to_id_dict[self.__referred_by_view_id_to_node_dict[int(item)]]
self.__go_to_menu.tk_popup(event.x_root, event.y_root, 0)
def __on__properties_view_right_click(self,event):
# find entry
item = self.__property_view.identify('item',event.x,event.y)
if item != '' and int(item) in self.__property_view_id_to_node_dict:
self.__go_to_node_id_in_asr_explorer = self.__asr_explorer_node_to_id_dict[self.__property_view_id_to_node_dict[int(item)]]
self.__go_to_menu.tk_popup(event.x_root, event.y_root, 0)
def __on__asr_explorer_right_click(self,event):
# find entry
self.__go_to_node_id_in_asr_explorer = self.__asr_explorer.identify('item',event.x,event.y)
self.__asr_explorer_menu.tk_popup(event.x_root, event.y_root, 0)
def __copy_name_to_clip_board(self):
item = self.__go_to_node_id_in_asr_explorer
if item != '' and int(item) in self.__asr_explorer_id_to_node_dict:
self.__root.clipboard_clear()
self.__root.clipboard_append(self.__asr_explorer_id_to_node_dict[int(item)].name)
def __copy_path_to_clip_board(self):
item = self.__go_to_node_id_in_asr_explorer
if item != '' and int(item) in self.__asr_explorer_id_to_node_dict:
self.__root.clipboard_clear()
self.__root.clipboard_append(self.__asr_explorer_id_to_node_dict[int(item)].path)
def __go_to_node_in_asr_explorer(self):
if self.__go_to_node_id_in_asr_explorer is not None:
self.__open_node(self.__asr_explorer.parent(self.__go_to_node_id_in_asr_explorer))
self.__asr_explorer.selection_set(self.__go_to_node_id_in_asr_explorer)
self.__asr_explorer.focus(self.__go_to_node_id_in_asr_explorer)
self.__asr_explorer.see(self.__go_to_node_id_in_asr_explorer)
# Update the views
node = self.__asr_explorer_id_to_node_dict[int(self.__go_to_node_id_in_asr_explorer)]
self.__update_properties_view(node)
self.__update_referred_by_view(node)
def __open_node(self, id):
parent_id = self.__asr_explorer.parent(id)
if parent_id != '':
self.__open_node(parent_id)
self.__asr_explorer.item(id, open=True)
def __on_asr_explorer_selection(self, event):
# find entry
self.__select_node(self.__asr_explorer.identify('item',event.x,event.y))
def __select_node(self, item):
if item != '':
node = self.__asr_explorer_id_to_node_dict[int(item)]
self.__update_properties_view(node)
self.__update_referred_by_view(node)
def __on_asr_explorer_key_released(self, event):
self.__select_node(self.__asr_explorer.focus())
def __on_search_view_selection(self, event):
self.__search_view_select_node(self.__search_view.identify('item',event.x,event.y))
def __on_search_view_key_released(self, event):
self.__search_view_select_node(self.__search_view.focus())
def __search_view_select_node(self, item):
if item != '':
self.__go_to_node_id_in_asr_explorer = self.__asr_explorer_node_to_id_dict[self.__search_view_id_to_node_dict[int(item)]]
self.__go_to_node_in_asr_explorer()
def __update_properties_view(self, node):
# clear old properties tree values first
self.__property_view.delete(*self.__property_view.get_children())
self.__property_view_id_to_node_dict.clear()
# Add new values
id = 1
for name,value in node.get_property_values().items():
propertyValue = ''
if isinstance(value, set):
if len(value) > 0:
propertyValue = '['
for v in value:
propertyValue = propertyValue + str(value) if value is not None else '' + ','
propertyValue = propertyValue + ']'
elif isinstance(value,Enum):
propertyValue = value.literal if value is not None else ''
else:
propertyValue = str(value) if value is not None else ''
if isinstance(value, Referrable):
self.__property_view_id_to_node_dict[id] = value
# adjust column's width if necessary to fit each value
col_w = self.__get_padded_text_width(propertyValue)
if self.__property_view.column('#1',width=None) < col_w:
self.__property_view.column('#1', width=col_w)
self.__property_view.insert('',
"end",
iid=id,
text=name,
values=(propertyValue))
id +=1
def __update_search_view(self, nodes):
# clear old search tree values first
self.__search_view.delete(*self.__search_view.get_children())
self.__search_view_id_to_node_dict.clear()
# Add new values
id = 1
for node in nodes:
text = str(node)
self.__search_view.insert('',
"end",
iid=id,
text=text)
# adjust column's width if necessary to fit each value
col_w = self.__get_padded_text_width(text)
if self.__search_view.column('#0',width=None) < col_w:
self.__search_view.column('#0', width=col_w)
# Add to dict for later usage
self.__search_view_id_to_node_dict[id] = node
id +=1
def __update_referred_by_view(self, node):
# clear old tree values first
self.__referred_by_view.delete(*self.__referred_by_view.get_children())
self.__referred_by_view_id_to_node_dict.clear()
id = 1
for ref in node.referenced_by:
text = str(ref)
self.__referred_by_view.insert('',
"end",
iid=id,
text=text)
# adjust column's width if necessary to fit each value
col_w = self.__get_padded_text_width(text)
if self.__referred_by_view.column('#0',width=None) < col_w:
self.__referred_by_view.column('#0', width=col_w)
# Add to dict for later usage
self.__referred_by_view_id_to_node_dict[id] = ref
id += 1
def __populate_tree(self, autosar_root):
idCounter = itertools.count()
id = next(idCounter)
root_tree = self.__asr_explorer.insert('', 'end', iid=id, text="AutosarRoot", values=('AUTOSAR'))
self.__asr_explorer_id_to_node_dict[id] = autosar_root
self.__asr_explorer_node_to_id_dict[autosar_root] = id
self.__add_child(autosar_root, root_tree, idCounter)
def __add_child(self, node, parentItem, idCounter):
for child in node.get_children():
childTree = self.__create_tree_item(child, parentItem, idCounter)
# add child nodes
self.__add_child(child, childTree, idCounter)
def __create_tree_item(self, node, parentItem, idCounter):
id = next(idCounter)
self.__asr_explorer_id_to_node_dict[id] = node
self.__asr_explorer_node_to_id_dict[node] = id
element_text = node.name if node.name is not None else node.__class__.__name__
type_text = str(node)
"""
# adjust column's width if necessary to fit each value
col_w = self.__get_padded_text_width(element_text)
if self.__asr_explorer.column('#0',width=None) < col_w:
self.__asr_explorer.column('#0', width=col_w)
col_w = self.__get_padded_text_width(type_text)
if self.__asr_explorer.column('#1',width=None) < col_w:
self.__asr_explorer.column('#1', width=col_w)
"""
return self.__asr_explorer.insert(parentItem,
"end",
iid=id,
text=element_text,
values=type_text)
def __get_padded_text_width(self, text):
return self.__font__.measure(text + '__') + (2 * __PAD_X__)
def __client_exit(self, root):
root.destroy()
def show_in_ui(autosarRoot):
win = tk.Tk()
Application(win, autosarRoot)
win.mainloop()
``` |
{
"source": "jonathanvoelkle/GroupEm",
"score": 3
} |
#### File: GroupEm/groupem/combinations.py
```python
def combinations_fixed_sum(fixed_sum, length_of_list, lst=[]):
if length_of_list == 1:
lst += [fixed_sum]
yield lst
else:
for i in range(fixed_sum+1):
yield from combinations_fixed_sum(i, length_of_list-1, lst + [fixed_sum-i])
def combinations_fixed_sum_limits(fixed_sum, length_of_list, minimum, maximum, lst=[]):
if length_of_list == 1:
lst += [fixed_sum]
if fixed_sum >= minimum[-length_of_list] and fixed_sum <= maximum[-length_of_list]:
yield lst
else:
for i in range(min(fixed_sum, maximum[-length_of_list]), minimum[-length_of_list]-1, -1):
yield from combinations_fixed_sum_limits(fixed_sum-i, length_of_list-1, minimum, maximum, lst + [i])
``` |
{
"source": "jonathanvoelkle/twemoji",
"score": 3
} |
#### File: jonathanvoelkle/twemoji/main.py
```python
import os
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib import image as mpimg
import statistics
# 2194
def getColors(img):
pixels = img.reshape((72*72, 4)) .tolist()
# filter pixels for all with A > .9 (almost nontransparent)
pixels = list(filter(lambda x: x[3] >= .9, pixels))
# turn in [R, G, B, A]
img_reshaped = np.array(pixels).T
mean = list(map(lambda x: np.mean(x), img_reshaped))
median = list(map(lambda x: np.median(x), img_reshaped))
mode = list(map(lambda x: (Counter(x).most_common(1))[0][0], img_reshaped))
print('hi')
print(mode)
# tupleify image to remove duplicates
pixels = [tuple(i) for i in pixels]
count = [[x, pixels.count(x)] for x in set(pixels)]
def getFrequency(val):
return val[1]
count.sort(key=getFrequency, reverse=True)
most = list((count[0])[0])
fortyforty = img[40, 40]
dings = [mean, median, mode, most, fortyforty]
return dings
def plotWithColors(img, colors):
fig, ax = plt.subplots()
ax.imshow(img)
for d in range(len(colors)):
print(colors[d])
rect = Rectangle((d * 20.0, 80.0), 20, 20,
alpha=1, facecolor=colors[d])
ax.add_patch(rect)
plt.xlim((0, 120))
plt.ylim((100, 0))
plt.show()
pngs = os.listdir("assets/72x72/")
# pngs = ["1f1e6-1f1ea.png"]
for png in pngs:
print(png)
img = mpimg.imread(str('assets/72x72/' + png))
colors = getColors(img)
plotWithColors(img, colors)
``` |
{
"source": "JonathanVose/CMPT-120L-910-20F",
"score": 4
} |
#### File: Assignments/Assignment 4/Assgnment 4.py
```python
class Summation(object):
# Initializes an instance of the Summation class
def __init__ (self,number_stop):
self.number = number_stop
# Function that sums a list of numbers from 1 to (including) the users input
def sum (self):
total = 0
for num in range(self.number):
total += num
total = total + (num + 1)
return total
# User input that is stored in the Summation class
if __name__ == "__main__":
user_number = Summation(int(input("Please enter a number to which you would like to sum: ")))
print(user_number.sum())
```
#### File: Assignments/Assignment 5/dry_code_two.py
```python
def saturdays_bank_transactions(transatcions) -> (float, float):
savings = 1096.25
checking = 1590.80
num = 0
for value in transactions:
temp_value = (transactions[num])
if temp_value >= 0:
checking += (temp_value * 0.85)
savings += (temp_value * 0.15)
else:
checking += temp_value
num += 1
return checking, savings
if __name__ == "__main__":
transactions = [300.00, -50.00, -5.00, -20, 15.72, 2083.93, -1034.00, -420.00, -5.23, -15.93, -72.90]
new_balance = saturdays_bank_transactions(transactions)
print("Your new checking balance is:", '${:.2f}'.format(round(new_balance[0], 2)), "\n", "Your new savings balance is:", '${:.2f}'.format(round(new_balance[1], 2)))
``` |
{
"source": "JonathanVusich/pydb",
"score": 3
} |
#### File: litedb/index/index.py
```python
from typing import Union, Optional, Set
from sortedcontainers import SortedDict
NoneType = type(None)
class Index:
"""
This class stores maps valid index values to their corresponding list index.
"""
def __init__(self, index_type=None):
self.indexes: SortedDict[object, Union[int, Set[int]]] = SortedDict()
self.none_indexes: Set[int] = set()
self._index_type = index_type
@property
def index_type(self):
return self._index_type
def __eq__(self, other):
if isinstance(other, Index):
return self.none_indexes == other.none_indexes and self.indexes == other.indexes and self.index_type == other.index_type
else:
raise NotImplementedError
def __len__(self):
return len(self.indexes) + len(self.none_indexes)
def add(self, value, index: int) -> None:
"""This method adds an index for a given value."""
# If the index type is None, check the value for a type and assign the index type to it if it is not None
if self._index_type is None:
value_type = type(value)
if value_type is not NoneType:
self._index_type = value_type
if value is None:
self.none_indexes.add(index)
return
if value in self.indexes:
indexes = self.indexes[value]
if isinstance(indexes, set):
self.indexes[value].add(index)
else:
self.indexes[value] = {indexes, index}
else:
self.indexes.update({value: index})
def retrieve(self, value) -> Set[int]:
"""Return a set that contains the indexes that match the specified value."""
if value is None:
if len(self.none_indexes) > 0:
return self.none_indexes
elif value in self.indexes:
indexes = self.indexes[value]
if isinstance(indexes, int):
return {indexes}
return self.indexes[value]
else:
return set()
def retrieve_range(self, low, high) -> Optional[Set[int]]:
"""This function retrieves a range of values depending on the high and low indexes given."""
if low is None:
return_set = self.none_indexes.copy()
min_index = 0
else:
min_index = self.indexes.bisect_left(low)
return_set = set()
max_index = self.indexes.bisect_right(high) if high is not None else len(self.indexes)
index_sets = self.indexes.values()[min_index:max_index]
if len(index_sets) == 0 and len(return_set) == 0:
return
for index in index_sets:
if isinstance(index, set):
return_set.update(index)
else:
return_set.add(index)
return return_set
def destroy(self, value, index: int) -> None:
"""
Removes an index from the database depending on the value of the tracked value.
:param value:
:param index:
:return:
"""
if value is None:
self.none_indexes.remove(index)
return
entry = self.indexes[value] if value in self.indexes else None
if entry is not None:
if isinstance(entry, set):
if index not in entry:
raise KeyError
entry.remove(index)
if len(entry) == 1:
self.indexes[value] = entry.pop()
else:
if index == entry:
self.indexes.pop(value)
else:
raise KeyError
else:
raise KeyError
```
#### File: litedb/index/memory_index.py
```python
from typing import Optional, Set
from litedb.abc import IndexManager
from .index import Index
from ..errors import InvalidRange
from ..utils.index import retrieve_possible_object_indexes
NoneType = type(None)
class MemoryIndex(IndexManager):
"""This is a index manager that handles indexes for all of the different types
in the database."""
def __init__(self):
self.index_map = {}
self.index_blacklist = set()
def index_item(self, item: object, index: int) -> None:
"""Inserts/creates index tables based on the given object."""
indexes = retrieve_possible_object_indexes(item)
for var_name, value in indexes.items():
if var_name in self.index_blacklist:
continue
if var_name not in self.index_map:
# if the first item value is None, create the index without assigning type
value_type = type(value)
if value_type is NoneType:
self.index_map.update({var_name: Index()})
else:
self.index_map.update({var_name: Index(type(value))})
try:
self.index_map[var_name].add(value, index)
except TypeError:
self.index_map.pop(var_name)
self.index_blacklist.add(var_name)
def unindex_item(self, item: object, index: int) -> None:
"""Removes indexes for the given object."""
indexes = retrieve_possible_object_indexes(item)
for var_name, value in indexes.items():
if var_name not in self.index_blacklist:
self.index_map[var_name].destroy(value, index)
def retrieve(self, **kwargs) -> Optional[Set[int]]:
"""Retrieves indexes that match the given parameters."""
indexes: Set[int] = set()
for x, key in enumerate(kwargs.keys()):
if key in self.index_blacklist or key not in self.index_map:
raise IndexError(f"{key} is not a valid index!")
index = self.index_map[key]
if len(index) == 0:
continue
value = kwargs[key]
if isinstance(value, tuple):
if len(value) != 2:
raise InvalidRange
low, high = value
if low is not None and not isinstance(low, index.index_type):
raise ValueError(f"The low value of \"{key}\" must be of type {index.index_type}")
if high is not None and not isinstance(high, index.index_type):
raise ValueError(f"The high value of \"{key}\" must be of type {index.index_type}")
if x == 0:
results = index.retrieve_range(low, high)
if results is not None:
indexes.update(results)
else:
results = index.retrieve_range(low, high)
if results is not None:
indexes.intersection_update(results)
else:
if value is not None and not isinstance(value, index.index_type):
raise ValueError(f"\"{key}\" must be of type {index.index_type}")
results = index.retrieve(value)
if results is not None:
if x == 0:
indexes.update(results)
else:
indexes.intersection_update(results)
if len(indexes) > 0:
return indexes
```
#### File: litedb/index/persistent_index.py
```python
import os
from .memory_index import MemoryIndex
from ..utils.serialization import dump_object, load_object
class PersistentIndex(MemoryIndex):
"""An extension of the in-memory index class that commits index
changes to disk."""
def __init__(self, index_path: str) -> None:
super().__init__()
self.index_path = index_path
self.blacklist_path = os.path.join(self.index_path, "blacklist")
self.map_path = os.path.join(self.index_path, "map")
self.load()
def load(self) -> None:
"""Loads the index from disk."""
index_map = load_object(self.map_path)
if index_map is not None:
self.index_map = index_map
blacklist = load_object(self.blacklist_path)
if blacklist is not None:
self.index_blacklist = blacklist
def commit(self) -> None:
"""Persists the index to disk."""
dump_object(self.blacklist_path, self.index_blacklist)
dump_object(self.map_path, self.index_map)
```
#### File: litedb/shard/buffer.py
```python
import os
from typing import Dict
from .shard import Shard
from .shardlru import ShardLRU
from ..utils.serialization import load_shard, dump_shard, get_checksum
from ..database.config import Config
class ShardBuffer:
"""
Manages serializing and deserializing shards on the fly.
Also allows iteration for easy object collection.
"""
def __init__(self, table_dir: str, shard_paths: Dict[int, str], config: Config) -> None:
self.table_dir = table_dir
self.loaded_shards: Dict[int, Shard] = {}
self.shard_paths = shard_paths
self.current_shard_index: int = -1
self.lru = ShardLRU(max_len=config.page_cache)
self.config = config
def __iter__(self):
self.current_shard_index = -1
return self
def __next__(self) -> Shard:
self.current_shard_index += 1
if self.current_shard_index in self.shard_paths:
self._ensure_shard_loaded(self.current_shard_index)
return self.loaded_shards[self.current_shard_index]
else:
raise StopIteration
def __getitem__(self, shard_index: int) -> Shard:
self._ensure_shard_loaded(shard_index)
return self.loaded_shards[shard_index]
def _create_new_shard_path(self) -> str:
"""Creates a new shard path that will not collide with any others."""
shard_name = f"shard{len(self.shard_paths)}"
return os.path.join(self.table_dir, shard_name)
def _ensure_shard_loaded(self, shard_index: int) -> None:
"""
Ensures that the given shard index has been loaded.
:param shard_index:
:return:
"""
shard_to_persist = self.lru.update(shard_index)
if shard_to_persist is not None:
self._free_shard(shard_to_persist)
if shard_index not in self.loaded_shards:
if shard_index in self.shard_paths:
self.loaded_shards.update(
{shard_index: Shard.from_bytes(load_shard(self.shard_paths[shard_index]), self.config.page_size)})
else:
self.loaded_shards.update({shard_index: Shard()})
self.shard_paths.update({shard_index: self._create_new_shard_path()})
def _free_shard(self, shard: int) -> None:
"""Clears a shard from memory and saves it to disk."""
self._persist_shard(shard)
if shard in self.loaded_shards:
self.loaded_shards.pop(shard)
def _persist_shard(self, shard: int) -> None:
"""Saves a shard to disk."""
if not self._shard_has_changes(shard):
return
if shard in self.loaded_shards:
shard_path = self.shard_paths[shard]
shard_data = self.loaded_shards[shard]
dump_shard(shard_path, shard_data.to_bytes())
def _shard_has_changes(self, shard: int) -> bool:
"""Uses checksums to calculate if a shard has changed."""
if os.path.exists(self.shard_paths[shard]):
saved_checksum = get_checksum(self.shard_paths[shard])
return not saved_checksum == self.loaded_shards[shard].checksum
return True
def commit(self) -> None:
"""Persists all shards."""
for shard in self.loaded_shards:
self._persist_shard(shard)
```
#### File: tests/test_shard/test_buffer.py
```python
from collections import deque
import pytest
from litedb.shard.buffer import ShardBuffer
from litedb.shard.shard import Shard
from litedb.utils.serialization import dump_shard, load_shard
from litedb import Config
@pytest.fixture()
def buffer(tmpdir):
temp_directory = tmpdir.mkdir("table")
table_dir = str(temp_directory)
paths = {0: str(temp_directory.join("shard0"))}
shard = Shard()
dump_shard(temp_directory.join("shard0"), shard.to_bytes())
buffer = ShardBuffer(table_dir, paths, Config())
return buffer
@pytest.fixture()
def empty_buffer():
return ShardBuffer("tabledir", {}, Config())
def test_buffer_init(buffer):
assert buffer.current_shard_index == -1
assert isinstance(buffer.table_dir, str)
assert 0 in buffer.shard_paths
assert buffer.lru.mru == deque([])
def test_empty_buffer_iter(empty_buffer):
for _ in empty_buffer:
raise AssertionError
def test_buffer_iter(buffer):
for x, shard in enumerate(buffer):
assert x < 1
empty_shard = Shard()
assert shard.binary_blobs == empty_shard.binary_blobs
assert shard.checksum == empty_shard.checksum
assert shard.none_constant == empty_shard.none_constant
def test_buffer_get_item(buffer):
blank_shard = Shard()
empty_shard = buffer[0]
assert len(buffer.shard_paths) == 1
assert len(buffer.loaded_shards) == 1
assert empty_shard.binary_blobs == blank_shard.binary_blobs
assert empty_shard.checksum == blank_shard.checksum
second_shard = buffer[1]
assert len(buffer.shard_paths) == 2
assert len(buffer.loaded_shards) == 2
assert second_shard.binary_blobs == blank_shard.binary_blobs
assert second_shard.checksum == blank_shard.checksum
def test_buffer_create_new_path(buffer, tmpdir):
assert buffer._create_new_shard_path() == str(tmpdir.join("table").join("shard1"))
# add new shard
buffer[1]
assert buffer._create_new_shard_path() == str(tmpdir.join("table").join("shard2"))
def test_buffer_ensure_shard_loaded(buffer):
default_config = Config()
first_shard = buffer[0]
first_shard[0] = b"test"
# fill up the buffer
for i in range(1, default_config.page_cache + 1):
buffer._ensure_shard_loaded(i)
# shard should be evicted
assert 0 not in buffer.loaded_shards
assert 0 in buffer.shard_paths
# get first shard
first_shard = buffer[0]
assert first_shard[0] == b"test"
def test_buffer_persist_shard(buffer, tmpdir):
empty_shard = buffer[0]
assert 0 in buffer.loaded_shards
buffer._persist_shard(0)
shard_dir = buffer.shard_paths[0]
file_shard = Shard.from_bytes(load_shard(shard_dir), 512)
assert empty_shard.binary_blobs == file_shard.binary_blobs
assert empty_shard.checksum == file_shard.checksum
empty_shard[0] = b"test"
buffer._persist_shard(0)
file_shard = Shard.from_bytes(load_shard(shard_dir), 512)
assert file_shard[0] == b"test"
def test_buffer_free_shard(buffer):
empty_shard = buffer[0]
empty_shard[0] = b"test"
buffer._free_shard(0)
shard_dir = buffer.shard_paths[0]
file_shard = Shard.from_bytes(load_shard(shard_dir), 512)
assert file_shard.checksum == empty_shard.checksum
assert file_shard.binary_blobs == empty_shard.binary_blobs
assert 0 not in buffer.loaded_shards
assert len(buffer.loaded_shards) == 0
``` |
{
"source": "JonathanWamsley/typers_terminal",
"score": 3
} |
#### File: typers_terminal/application/app.py
```python
import curses
import sys
import os
import types
from application.text_displayer import TextDisplayer
from application.typing_drills import TypingDrills
from application.speed_reading_displayer import SpeedReadingDisplayer
from application.sponge_typing_displayer import SpongeTypingDisplayer
from application.windows import SelectionWindow
from application.typing_app import TypingApp
'''The application contains only a selection menus with a corresponding functionalities.
Each application menu inherits from menu
Each menu calls on another menu, class or func
'''
class Start(SelectionWindow):
'''The opening menu where the program starts
as well as a often being used as an end screen'''
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.package_functions()
self.display_screen()
def package_functions(self):
def exit(self):
sys.exit(0)
func = {
'Typing V2': Typing2,
'Sponge Typing': SpongeTyping,
'Typing': Typing,
'Speed Reading': SpeedReading,
'View Statistics': exit,
'Settings': Settings,
'Exit': exit,
}
self.set_selection_functionality(func)
class Typing(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Drills': Drills,
'Submit Text': SubmitText,
'Return To Menu': Start,
}
self.set_selection_functionality(func)
class Typing2(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Drills': (TypingApp, 'clipboard'),
'Submit Text': SubmitText,
'Return To Menu': Start,
}
self.set_selection_functionality(func)
class Drills(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.package_functions()
self.display_screen()
def package_functions(self):
func = {
'Bigraphs': (DrillsWordList, 'bigraphs'),
'Trigraphs': (DrillsWordList, 'trigraphs'),
'Words': (DrillsWordList, 'words'),
'Return To Typing': Typing,
}
self.set_selection_functionality(func)
class DrillsWordList(SelectionWindow):
def __init__(self, stdscr, drill_type = 'words'):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.drill_type = drill_type
self.package_functions()
self.display_screen()
def package_functions(self):
# TODO codesmell, should put functions in separate file to be called
# TODO codesmell and UI smell, SelectionWindows are called to get a single parameter
def select_word_list(self):
file_mappings = {}
for file in os.listdir("../data/"):
if file.endswith(".txt"):
file_mappings[file] = (DrillsWordAmount, (self.drill_type, file))
file_mappings['Return To Menu'] = Drills
return file_mappings
func = select_word_list(self)
self.set_selection_functionality(func)
class DrillsWordAmount(SelectionWindow):
def __init__(self, stdscr, drill_type = 'words'):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.drill_type = drill_type
self.package_functions()
self.display_screen()
def __init__(self, stdscr, args):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.drill_type, self.file = args
self.package_functions()
self.display_screen()
def package_functions(self):
def prompt_word_amount(self):
amount = get_word_amount(self)
return (DrillsWordFilter, (self.drill_type, self.file, amount))
def get_word_amount(self):
self.stdscr.clear()
amount = ''
curses.curs_set(2)
while True:
self.stdscr.addstr(0, 0, 'Enter The Amount Of Words To Type: ')
self.stdscr.clrtoeol()
self.stdscr.addstr(amount)
char = self.stdscr.get_wch()
if char.isprintable():
amount += char
elif char == curses.KEY_BACKSPACE or char == '\x7f':
amount = amount[:-1]
elif char == curses.KEY_ENTER or char == '\n':
try:
amount = int(amount)
except:
amount = ''
else:
break
self.stdscr.clear()
return int(amount)
func = {
'Words Amount': prompt_word_amount(self),
'Return To Typing': Typing,
}
self.set_selection_functionality(func)
class DrillsWordFilter(SelectionWindow):
def __init__(self, stdscr, args):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.drill_type, self.file, self.word_amount = args
self.package_functions()
self.display_screen()
def package_functions(self):
def display_words(self):
filtered_letters = get_filter_letters(self)
displayed_words = TypingDrills(self.drill_type, self.file, self.word_amount, filtered_letters)
return (TextDisplayer, ' '.join(displayed_words.words))
def get_filter_letters(self):
self.stdscr.clear()
filter_letters = ''
curses.curs_set(2)
while True:
self.stdscr.addstr(0, 0, 'Enter The Starting Letters To Use Or Enter Blank For All: ')
self.stdscr.clrtoeol()
self.stdscr.addstr(filter_letters)
char = self.stdscr.get_wch()
if char.isprintable():
filter_letters += char
elif char == curses.KEY_BACKSPACE or char == '\x7f':
filter_letters = filter_letters[:-1]
elif char == curses.KEY_ENTER or char == '\n':
break
self.stdscr.clear()
return filter_letters
def about(self):
pass
func = {
'Filtered Words': display_words(self),
'Return To Typing': Typing,
}
self.set_new_screen(Start)
self.set_selection_functionality(func)
class SubmitText(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Enter URL': (TextDisplayer, 'url'),
'Paste Clipboard': (TextDisplayer, 'clipboard'),
'Return To Typing': Typing,
}
self.set_new_screen(Start)
self.set_selection_functionality(func)
class SpeedReading(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Enter URL': (SpeedReadingDisplayer, 'url'),
'Paste Clipboard': (SpeedReadingDisplayer, 'clipboard'),
'Return To Menu': Start,
}
self.set_new_screen(Start)
self.set_selection_functionality(func)
class SpongeTyping(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.stdscr = stdscr
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Enter URL': (SpongeTypingDisplayer, 'url'),
'Paste Clipboard': (SpongeTypingDisplayer, 'clipboard'),
'Return To Menu': Start,
}
self.set_new_screen(Start)
self.set_selection_functionality(func)
class Settings(SelectionWindow):
def __init__(self, stdscr):
SelectionWindow.__init__(self, stdscr)
self.package_functions()
self.display_screen()
def package_functions(self):
def about(self):
pass
func = {
'Change Key Configurations': about,
'Restore Default Key Configurations': about,
'Change Screen Colors': about,
'Return To Menu': Start,
}
self.set_selection_functionality(func)
```
#### File: typers_terminal/application/utilities.py
```python
import curses
from curses import wrapper
from bs4 import BeautifulSoup
import requests
from application.windows import TextWindow
'''This utilities.py holds functions used in the *app.py files
Main functions are:
get_text
process_text
analyze_text
'''
def get_text(stdscr, input_type):
'''opens either url or clipboard text prompt'''
def get_text_from_url(stdscr):
url = TextWindow(stdscr, message = 'Enter a URL and F4 when done: ').get_output()
text = scrape_url(url)
return text
def scrape_url(url):
text = ''
if url:
headers = requests.utils.default_headers()
headers.update({ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'})
req = requests.get(url, headers)
soup = BeautifulSoup(req.content, 'html.parser')
wanted_tags = ['p', 'li', 'ul']
for header in soup.find_all(['h1','h2','h3']):
# a \h is used to indicate header
text += header.get_text() + '\h' + '\n'
for elem in header.next_elements:
if elem.name and elem.name.startswith('h'):
break
if any([True for tag in wanted_tags if tag == elem.name]):
text += elem.get_text() + '\n'
return text
def get_text_from_clipboard(stdscr):
return TextWindow(stdscr, message = 'Paste Clipboard and F4 when done: ').get_output()
if input_type == 'url':
text = get_text_from_url(stdscr)
elif input_type == 'clipboard':
text = get_text_from_clipboard(stdscr)
return text
def fit_text(doc, max_line_height, max_char_width):
'''Takes in a raw text and applies transforms so the text can be displayed
- format text to screen
- format width
- format hight
- output
- screens of lines or words: screens[lines[words]]
'''
def divide_chunks_by_width(paragraph, max_char_width):
line = ''
words = paragraph.split()
header = False
if paragraph[-2:] == '\h':
header = True
for idx, word in enumerate(words):
if len(line) + len(word) + 1 < max_char_width:
line += word + ' '
else:
if header:
line += '\h'
yield line
line = word + ' '
if idx == len(words) - 1:
yield line
def divide_chunks_by_height(paragraphs, max_line_height):
if len(paragraphs) < max_line_height:
yield paragraphs
else:
for idx in range(0, len(paragraphs), max_line_height):
yield paragraphs[idx:idx + max_line_height]
paragraphs = doc.split('\n')
paragraph_fitted_on_screen = []
for paragraph in paragraphs:
paragraph_by_width = [*divide_chunks_by_width(paragraph, max_char_width)]
paragraph_fitted_on_screen.append([*divide_chunks_by_height(paragraph_by_width, (max_line_height))])
return paragraph_fitted_on_screen
def apply_nlp(doc):
pass
def analyze_text():
pass
def main(stdscr):
text = TextWindow(stdscr, 'clipboard')
if __name__ == "__main__":
wrapper(main)
``` |
{
"source": "jonathanwang017/Pyweek19_team",
"score": 3
} |
#### File: Pyweek19_team/windows_dist/audio.py
```python
import pygame
pygame.mixer.pre_init(frequency=44100, size=-16, channels=1, buffer=4096)
directory = ''
def play_sound(sound, time):
sound = pygame.mixer.Sound(sound)
if time == 0:
sound.play()
else:
sound.play(maxtime = time)
def stop_sound(sound):
sound = pygame.mixer.Sound(sound)
sound.stop()
def hit_wall():
play_sound(directory + 'hitwall_sfx.wav', 0)
def level_end():
play_sound(directory + 'levelend_sfx.wav', 0)
def hit_switch():
play_sound(directory + 'switchstep_sfx.wav', 1000)
def step_spike():
play_sound(directory + 'spikestep_sfx.wav', 0)
def bg_music():
bgm = pygame.mixer.Sound('Pyweek_BG_1.wav')
bgm.set_volume(0.3)
bgm.play()
def bg_music_stop():
bgm = pygame.mixer.Sound('Pyweek_BG_1.wav')
bgm.stop()
``` |
{
"source": "JonathanWenger/probabilistic-linear-solvers-for-ml",
"score": 3
} |
#### File: experiments/scripts/pde_interpolate_sol.py
```python
import argparse
import os
import sys
import numpy as np
from fenics import *
from dolfin import *
from mshr import *
def main(args):
"""
Main entry point allowing external calls
Parameters
----------
args : list
command line parameter list
"""
args = parse_args(args)
# Filepaths
data_path = args.data_path
# Settings
parameters["reorder_dofs_serial"] = False # same mesh and linear system order
# Mesh sizes
mesh_res_coarse = args.resolutions[0]
mesh_res_fine = args.resolutions[1]
# -- Load Mesh and create Function Space --
mesh_coarse = Mesh(data_path + "mesh_res{}.xml".format(mesh_res_coarse))
mesh_fine = Mesh(data_path + "mesh_res{}.xml".format(mesh_res_fine))
V_coarse = FunctionSpace(mesh_coarse, "P", 1)
V_fine = FunctionSpace(mesh_fine, "P", 1)
# -- Load Solution --
u_vec = np.load(file=data_path + "solution_res{}.npy".format(mesh_res_fine))
u = Function(V_fine)
u.vector()[:] = u_vec
# -- Interpolate Solution --
u.set_allow_extrapolation(True) # fine and coarse mesh might not coincide
u_interpol = interpolate(u, V_coarse)
# -- Save Interpolation --
u_interpol_vec = np.array(u_interpol.vector().get_local())
np.save(
"{}solution_interpol_res{}tores{}".format(
data_path, mesh_res_fine, mesh_res_coarse
),
u_interpol_vec,
)
def parse_args(args):
"""
Parse command line parameters
Parameters
----------
args : list
command line parameters as list of strings
Returns
-------
argparse.Namespace : obj
command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Create mesh and linear system of a PDE via Galerkins method."
)
parser.add_argument(
"-f",
"--file",
dest="data_path",
help="filepath to save data at",
default="../../data/Galerkins_method/",
type=str,
)
parser.add_argument(
"-r",
"--resolutions",
dest="resolutions",
help="Mesh resolutions.",
default=[6, 128],
type=list,
)
return parser.parse_args(args)
def run():
"""
Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
``` |
{
"source": "JonathanWenger/pycalib",
"score": 2
} |
#### File: pycalib/pycalib/gp_classes.py
```python
import numpy as np
import tensorflow as tf
import gpflow
# Turn off tensorflow deprecation warnings
try:
from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0
# gpflow imports
from gpflow.mean_functions import MeanFunction
from gpflow import features
from gpflow.conditionals import conditional, Kuu
from gpflow import settings
from gpflow.decors import params_as_tensors
from gpflow.quadrature import ndiag_mc
from gpflow.params import Parameter, DataHolder, Parameterized
from gpflow.models.model import GPModel
from gpflow import transforms, kullback_leiblers
from gpflow.models.svgp import Minibatch
############################
# Mean Functions
############################
class Log(MeanFunction):
"""
Natural logarithm prior mean function.
:math:`y_i = \log(x_i)`
"""
def __init__(self):
MeanFunction.__init__(self)
@params_as_tensors
def __call__(self, X):
# Avoid -inf = log(0)
tiny = np.finfo(np.float).tiny
X = tf.clip_by_value(X, clip_value_min=tiny, clip_value_max=np.inf)
# Returns the natural logarithm of the input
return tf.log(X)
class ScalarMult(MeanFunction):
"""
Scalar multiplication mean function.
:math:`y_i = \\alpha x_i`
"""
def __init__(self, alpha=1):
MeanFunction.__init__(self)
self.alpha = Parameter(alpha, dtype=settings.float_type)
@params_as_tensors
def __call__(self, X):
# Scalar multiplication
return tf.multiply(self.alpha, X)
############################
# Models
############################
class SVGPcal(gpflow.models.GPModel):
"""
Probability calibration using a sparse variational latent Gaussian process.
This is the Sparse Variational GP [1]_ calibration model. It has a single one-dimensional GP as a latent function
which is applied to all inputs individually.
.. [1] <NAME>., <NAME>. & <NAME>. Scalable Variational Gaussian Process Classification in
Proceedings of AISTATS (2015)
"""
def __init__(self, X, Y, kern, likelihood, feat=None,
mean_function=None,
num_latent=None,
q_diag=False,
whiten=True,
minibatch_size=None,
Z=None,
num_data=None,
q_mu=None,
q_sqrt=None,
**kwargs):
"""
- X is a data matrix, size N x D
- Y is a data matrix, size N x P
- kern, likelihood, mean_function are appropriate GPflow objects
- Z is a matrix of pseudo inputs, size M x D
- num_latent is the number of latent process to use, defaults to one.
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
- minibatch_size, if not None, turns on mini-batching with that size.
- num_data is the total number of observations, default to X.shape[0]
(relevant when feeding in external minibatches)
"""
# sort out the X, Y into MiniBatch objects if required.
if minibatch_size is None:
X = DataHolder(X)
Y = DataHolder(Y)
else:
X = Minibatch(X, batch_size=minibatch_size, seed=0)
Y = Minibatch(Y, batch_size=minibatch_size, seed=0)
# init the super class, accept args
if num_latent is None:
num_latent = 1
GPModel.__init__(self, X, Y, kern, likelihood, mean_function, num_latent, **kwargs)
self.num_data = num_data or X.shape[0]
self.num_classes = X.shape[1]
self.q_diag, self.whiten = q_diag, whiten
self.feature = features.inducingpoint_wrapper(feat, Z)
# init variational parameters
num_inducing = len(self.feature)
self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
def _init_variational_parameters(self, num_inducing, q_mu, q_sqrt, q_diag):
"""
Constructs the mean and cholesky of the covariance of the variational Gaussian posterior.
If a user passes values for `q_mu` and `q_sqrt` the routine checks if they have consistent
and correct shapes. If a user does not specify any values for `q_mu` and `q_sqrt`, the routine
initializes them, their shape depends on `num_inducing` and `q_diag`.
Note: most often the comments refer to the number of observations (=output dimensions) with P,
number of latent GPs with L, and number of inducing points M. Typically P equals L,
but when certain multi-output kernels are used, this can change.
Parameters
----------
:param num_inducing: int
Number of inducing variables, typically referred to as M.
:param q_mu: np.array or None
Mean of the variational Gaussian posterior. If None the function will initialise
the mean with zeros. If not None, the shape of `q_mu` is checked.
:param q_sqrt: np.array or None
Cholesky of the covariance of the variational Gaussian posterior.
If None the function will initialise `q_sqrt` with identity matrix.
If not None, the shape of `q_sqrt` is checked, depending on `q_diag`.
:param q_diag: bool
Used to check if `q_mu` and `q_sqrt` have the correct shape or to
construct them with the correct shape. If `q_diag` is true,
`q_sqrt` is two dimensional and only holds the square root of the
covariance diagonal elements. If False, `q_sqrt` is three dimensional.
"""
q_mu = np.zeros((num_inducing, self.num_latent)) if q_mu is None else q_mu
self.q_mu = Parameter(q_mu, dtype=settings.float_type) # M x P
if q_sqrt is None:
if self.q_diag:
self.q_sqrt = Parameter(np.ones((num_inducing, self.num_latent), dtype=settings.float_type),
transform=transforms.positive) # M x P
else:
q_sqrt = np.array([np.eye(num_inducing, dtype=settings.float_type) for _ in range(self.num_latent)])
self.q_sqrt = Parameter(q_sqrt, transform=transforms.LowerTriangular(num_inducing,
self.num_latent)) # P x M x M
else:
if q_diag:
assert q_sqrt.ndim == 2
self.num_latent = q_sqrt.shape[1]
self.q_sqrt = Parameter(q_sqrt, transform=transforms.positive) # M x L/P
else:
assert q_sqrt.ndim == 3
self.num_latent = q_sqrt.shape[0]
num_inducing = q_sqrt.shape[1]
self.q_sqrt = Parameter(q_sqrt, transform=transforms.LowerTriangular(num_inducing,
self.num_latent)) # L/P x M x M
@params_as_tensors
def build_prior_KL(self):
if self.whiten:
K = None
else:
K = Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level) # (P x) x M x M
return kullback_leiblers.gauss_kl(self.q_mu, self.q_sqrt, K)
@params_as_tensors
def _build_likelihood(self):
"""
This gives a variational bound on the model likelihood.
"""
# Get prior KL
KL = self.build_prior_KL()
# Get conditionals
# TODO: allow for block-diagonal covariance
fmeans, fvars = self._build_predict(self.X, full_cov=False, full_output_cov=False)
# Get variational expectations
var_exp = self.likelihood.variational_expectations(fmeans, fvars, self.Y, full_cov=False)
# re-scale for minibatch size
scale = tf.cast(self.num_data, settings.float_type) / tf.cast(tf.shape(self.X)[0], settings.float_type)
return tf.reduce_sum(var_exp) * scale - KL
@params_as_tensors
def _build_predict(self, Xnew, full_cov=False, full_output_cov=False):
"""
Compute the mean and variance of :math:`p(f_* \\mid y)`.
Parameters
----------
Xnew : np.array, shape=(N, K)
full_cov : bool
full_output_cov : bool
Returns
-------
mus, vars :
Mean and covariances of the variational approximation to the GP applied to the K input dimensions of Xnew.
Dimensions: mus= N x K and vars= N x K (x K)
"""
# Reshape to obtain correct covariance
num_data_new = tf.shape(Xnew)[0]
Xnew = tf.reshape(Xnew, [-1, 1])
# Compute conditional
mu_tmp, var_tmp = conditional(Xnew, self.feature, self.kern, self.q_mu, q_sqrt=self.q_sqrt,
full_cov=full_cov,
white=self.whiten, full_output_cov=full_output_cov)
# Reshape to N x K
mu = tf.reshape(mu_tmp + self.mean_function(Xnew), [num_data_new, self.num_classes])
var = tf.reshape(var_tmp, [num_data_new, self.num_classes])
return mu, var
@params_as_tensors
def predict_f(self, X_onedim, full_cov=False, full_output_cov=False):
"""
Predict the one-dimensional latent function
Parameters
----------
X_onedim
Returns
-------
"""
# Compute conditional
mu, var = conditional(X_onedim, self.feature, self.kern, self.q_mu, q_sqrt=self.q_sqrt,
full_cov=full_cov,
white=self.whiten, full_output_cov=full_output_cov)
return mu + self.mean_function(X_onedim), var
@params_as_tensors
def predict_full_density(self, Xnew):
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_full_density(pred_f_mean, pred_f_var)
############################
# Inverse link functions
############################
class SoftArgMax(Parameterized):
"""
This class implements the multi-class softargmax inverse-link function. Given a vector :math:`f=[f_1, f_2, ... f_k]`,
then result of the mapping is :math:`y = [y_1 ... y_k]`, where
:math:`y_i = \\frac{\\exp(f_i)}{\\sum_{j=1}^k\\exp(f_j)}`.
"""
def __init__(self, num_classes, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
@params_as_tensors
def __call__(self, F):
return tf.nn.softmax(F)
############################
# Likelihoods
############################
class MultiCal(gpflow.likelihoods.Likelihood):
def __init__(self, num_classes, invlink=None, num_monte_carlo_points=100, **kwargs):
"""
A likelihood that performs multiclass calibration using the softargmax link function and a single latent
process.
Parameters
----------
num_classes : int
Number of classes.
invlink : default=None
Inverse link function :math:`p(y \mid f)`.
num_monte_carlo_points : int, default=100
Number of Monte-Carlo points for prediction, i.e. for the integral :math:`\int p(y=Y|f)q(f) df`, where
:math:`q(f)` is a Gaussian.
kwargs
"""
super().__init__(**kwargs)
self.num_classes = num_classes
if invlink is None:
invlink = SoftArgMax(self.num_classes)
elif not isinstance(invlink, SoftArgMax):
raise NotImplementedError
self.invlink = invlink
self.num_monte_carlo_points = num_monte_carlo_points
def logp(self, F, Y):
"""
Computes the log softargmax at indices from Y.
:math:`\\sigma(F)_y = \\frac{exp(F_y)}{\\sum_{k=1}^K \\exp(F_k)}`
Parameters
----------
F : tf.tensor, shape=(N, K)
Inputs to softargmax.
Y : tf.tensor, shape=(N, 1)
Indices of softargmax output.
Returns
-------
log_sigma_y : tf.tensor, shape=()
log softargmax at y
"""
if isinstance(self.invlink, SoftArgMax):
with tf.control_dependencies(
[
tf.assert_equal(tf.shape(Y)[1], 1),
tf.assert_equal(tf.cast(tf.shape(F)[1], settings.int_type),
tf.cast(self.num_classes, settings.int_type))
]):
return -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=F, labels=Y[:, 0])[:, None]
else:
raise NotImplementedError
def variational_expectations(self, Fmus, Fvars, Y, full_cov=False):
"""
Computes an approximation to the expectation terms in the variational objective.
This function approximates the :math:`n` expectation terms :math:`\\mathbb{E}_{q(f_n)}[\\log p(y_n \\mid f_n)]`
in the variational objective function.
Parameters
----------
Fmus : tf.tensor, shape=(N, K)
Means of the latent GP at input locations X. Dimension N x K.
Fvars : tf.tensor, shape=(N, K(, K))
Variances of the latent GP at input locations X. Dimension N x K (x K).
Y : tf.tensor, shape=(N,)
Output vector.
Returns
-------
ve : tf.tensor, shape=(N,)
The variational expectation assuming a Gaussian approximation q.
"""
if isinstance(self.invlink, SoftArgMax):
# Compute variational expectations by 2nd order Taylor approximation
sigma_mu = tf.nn.softmax(Fmus, axis=1)
if full_cov:
sigSsig = tf.einsum("nk, nkl, nl -> n", sigma_mu, Fvars, sigma_mu)
else:
sigSsig = tf.reduce_sum(tf.multiply(tf.multiply(sigma_mu, Fvars), sigma_mu), axis=1)
diagSsig = tf.reduce_sum(tf.multiply(sigma_mu, Fvars), axis=1)
logsoftargmax_y = tf.squeeze(self.logp(Fmus, Y))
# Objective function
return logsoftargmax_y + 0.5 * (sigSsig - diagSsig)
else:
raise NotImplementedError
def predict_mean_and_var(self, Fmus, Fvars):
"""
Given a Normal distribution for the latent function, return the mean of :math:`Y`, if
:math:`q(f) = N(Fmu, Fvar)` and this object represents :math:`p(y|f)`, then this method computes the predictive
mean :math:`\\int\\int y p(y|f)q(f) df dy` and the predictive variance
:math:`\\int\\int y^2 p(y|f)q(f) df dy - [ \\int\\int y p(y|f)q(f) df dy ]^2`.
Parameters
----------
Fmus : array/tensor, shape=(N, D)
Mean(s) of Gaussian density.
Fvars : array/tensor, shape=(N, D(, D))
Covariance(s) of Gaussian density.
"""
raise NotImplementedError
def predict_density(self, Fmus, Fvars, Y):
"""
Given a Normal distribution for the latent function, and a datum Y, compute the log predictive density of Y.
i.e. if :math:`p(f_* | y) = \\mathcal{N}(Fmu, Fvar)` and :math:`p(y_*|f_*)` is the likelihood, then this
method computes the log predictive density :math:`\\log \\int p(y_*|f)p(f_* | y) df`. Here, we implement a
Monte-Carlo routine.
Parameters
----------
Fmus : array/tensor, shape=(N, K)
Mean(s) of Gaussian density.
Fvars : array/tensor, shape=(N, K(, K))
Covariance(s) of Gaussian density.
Y : arrays/tensors, shape=(N(, K))
Deterministic arguments to be passed by name to funcs.
Returns
-------
log_density : array/tensor, shape=(N(, K))
Log predictive density.
"""
if isinstance(self.invlink, SoftArgMax):
return ndiag_mc(self.logp, self.num_monte_carlo_points, Fmus, Fvars,
logspace=True, epsilon=None, Y=Y)
else:
raise NotImplementedError
def predict_full_density(self, Fmus, Fvars):
if isinstance(self.invlink, SoftArgMax):
# Sample from standard normal
N = tf.shape(Fmus)[0]
epsilon = tf.random_normal((self.num_monte_carlo_points, N, self.num_classes),
dtype=settings.float_type)
# Transform to correct mean and covariance
f_star = Fmus[None, :, :] + tf.sqrt(Fvars[None, :, :]) * epsilon # S x N x K
# Compute Softmax
p_y_f_star = tf.nn.softmax(f_star, axis=2)
# Average to obtain log Monte-Carlo estimate
return tf.log(tf.reduce_mean(p_y_f_star, axis=0))
else:
raise NotImplementedError
``` |
{
"source": "JonathanWillnow/CompuStatsClustering",
"score": 3
} |
#### File: CompuStatsClustering/auxiliary/auxiliary.py
```python
import numpy as np
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import matplotlib.pyplot as plt
import itertools as tools
import scipy.stats as scipy
import sklearn.datasets as sk_data
import sklearn.cluster as sk_cluster
import sklearn_extra.cluster as skx_cluster
import sklearn.preprocessing as sk_preprocessing
import sklearn.metrics as sk_metrics
import random
import kneed
import time
random.seed(10)
def generate_blobs(n_samples, centers, cluster_std):
features, true_labels = sk_data.make_blobs(n_samples=n_samples, centers=centers, cluster_std=cluster_std, random_state = 10)
# Standartization
scaler = sk_preprocessing.StandardScaler()
features_scaled = scaler.fit_transform(features)
blobs_df = pd.DataFrame(features_scaled, columns = ["x1", "x2"])
blobs_df["label"] = true_labels
return(blobs_df)
def generate_spiraldata(n, plotting):
n_samples = n
t = 1.25 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
if plotting == True:
fig, ax = plt.subplots(figsize = (10,10))
ax = sns.scatterplot(X[:, 0], X[:, 1],cmap=plt.cm.nipy_spectral)
ax.set_title("complex dataset")
return(X)
###############################################################################
def plot_generated_datasets(complex_data,blobs_df):
fig, ax = plt.subplots(1,2,figsize = (22,10))
plt.subplot(121)
ax = sns.scatterplot(x= blobs_df.x1, y=blobs_df.x2, hue=blobs_df.label)
ax.set_title("Isotropic dataset");
plt.subplot(122)
ax = sns.scatterplot(x= complex_data[:, 0], y= complex_data[:, 1],cmap=plt.cm.nipy_spectral)
ax.set_title("Complex spiral dataset")
### https://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
def plot_clusters(data, data_c, algorithm, args, kwds):
fig, ax = plt.subplots(1,2, figsize = (20,10))
start_time = time.time()
labels = algorithm(*args, **kwds).fit_predict(data[["x1", "x2"]])
end_time = time.time()
palette = sns.color_palette('deep', np.unique(labels).max() + 1)
#colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels]
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["labels"] = labels
plt.subplot(121)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = labels)
ax.set_title('Clusters found by {}'.format(str(algorithm.__name__)), fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
#########
start_time = time.time()
labels_c = algorithm(*args, **kwds).fit_predict(data_c)
end_time = time.time()
palette = sns.color_palette('deep', np.unique(labels).max() + 1)
#colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels]
results_c = pd.DataFrame(data_c, columns = ["x1", "x2"])
results_c["labels_c"] = labels_c
plt.subplot(122)
ax = sns.scatterplot(x=results_c.x1, y=results_c.x2, hue = labels_c)
ax.set_title('Clusters found by {}'.format(str(algorithm.__name__)), fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
return(labels, labels_c)
####################################################################
def plot_Agglomerative_clusters(data,data_c, n, args):
kwds = {'n_clusters':n, 'linkage':'ward'}
fig, ax = plt.subplots(4,2, figsize = (20,40))
#start_time = time.time()
labels = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data)
#end_time = time.time()
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["labels"] = labels
plt.subplot(421)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = labels)
ax.set_title('Clusters found using ward' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
##
kwds = {'n_clusters':n, 'linkage':'ward'}
start_time = time.time()
labels_c = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data_c)
end_time = time.time()
results_c = pd.DataFrame(data_c, columns = ["x1", "x2"])
results_c["labels_c"] = labels_c
plt.subplot(422)
ax = sns.scatterplot(x=results_c.x1, y=results_c.x2, hue = labels_c)
ax.set_title('Clusters found using ward' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
###########
kwds = {'n_clusters':n, 'linkage':'complete'}
start_time = time.time()
labels = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data)
end_time = time.time()
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["labels"] = labels
plt.subplot(423)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = labels)
ax.set_title('Clusters found using complete linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
##
kwds = {'n_clusters':n, 'linkage':'complete'}
start_time = time.time()
labels_c = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data_c)
end_time = time.time()
results_c = pd.DataFrame(data_c, columns = ["x1", "x2"])
results_c["labels_c"] = labels_c
plt.subplot(424)
ax = sns.scatterplot(x=results_c.x1, y=results_c.x2, hue = labels_c)
ax.set_title('Clusters found using complete linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
###########
kwds = {'n_clusters':n, 'linkage':'average'}
start_time = time.time()
labels = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data)
end_time = time.time()
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["labels"] = labels
plt.subplot(425)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = labels)
ax.set_title('Clusters found using average linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
##
kwds = {'n_clusters':n, 'linkage':'average'}
start_time = time.time()
labels_c = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data_c)
end_time = time.time()
results_c = pd.DataFrame(data_c, columns = ["x1", "x2"])
results_c["labels_c"] = labels_c
plt.subplot(426)
ax = sns.scatterplot(x=results_c.x1, y=results_c.x2, hue = labels_c)
ax.set_title('Clusters found using average linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
###########
kwds = {'n_clusters':n, 'linkage':'single'}
start_time = time.time()
labels = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data)
end_time = time.time()
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["labels"] = labels
plt.subplot(427)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = labels)
ax.set_title('Clusters found using single linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
##
kwds = {'n_clusters':n, 'linkage':'single'}
start_time = time.time()
labels_c = sk_cluster.AgglomerativeClustering(*args, **kwds).fit_predict(data_c)
end_time = time.time()
results_c = pd.DataFrame(data_c, columns = ["x1", "x2"])
results_c["labels_c"] = labels_c
plt.subplot(428)
ax = sns.scatterplot(x=results_c.x1, y=results_c.x2, hue = labels_c)
ax.set_title('Clusters found using single linkage' , fontsize=24)
#plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)
###########
#############################################################################
def kmeans_validation_example(kmeans_kwargs, kmax, data):
sse = []
for k in range(1, kmax):
kmeans = sk_cluster.KMeans(n_clusters=k, **kmeans_kwargs, random_state = 10)
kmeans.fit(data[["x1", "x2"]])
sse.append(kmeans.inertia_)
kl = kneed.KneeLocator(range(1, kmax), sse, curve="convex", direction="decreasing")
# kl.elbow
silhouette_coef = []
for k in range(2, kmax):
kmeans = sk_cluster.KMeans(n_clusters=k, **kmeans_kwargs, random_state = 10)
kmeans.fit(data[["x1", "x2"]])
score = round(sk_metrics.silhouette_score(data, kmeans.labels_),4)
silhouette_coef.append(score)
max_score = max(silhouette_coef)
best_k = 2+silhouette_coef.index(max_score)
fig, ax = plt.subplots(2,2, figsize = (20,20))
plt.subplot(221)
ax = sns.scatterplot(x= data.x1, y=data.x2, hue=data.label)
#ax.set_xlabel("number of clusters")
ax.set_title("Original dataset");
labels_optimal = sk_cluster.KMeans(n_clusters=best_k, **kmeans_kwargs).fit_predict(data[["x1", "x2"]])
results_optimal = pd.DataFrame(data, columns = ["x1", "x2"])
results_optimal["labels"] = labels_optimal
plt.subplot(222)
ax = sns.scatterplot(x= results_optimal.x1, y=results_optimal.x2, hue=results_optimal.labels)
ax.set_title("Predicted clusters");
#fig.suptitle(f"k-means, Specifications: Centers: {centers}, Cluster std.: {cluster_std}", fontsize=16)
plt.subplot(223)
ax = sns.lineplot(x= range(1,kmax),y=sse)
ax1 = plt.axvline(x=kl.elbow, animated = True, ls = "--", c = "red")
ax.set_ylabel("SSE")
ax.set_xlabel("number of clusters")
ax.set_title("Ellbow Method");
plt.subplot(224)
ax = sns.lineplot(x=range(2,kmax), y=silhouette_coef)
ax1 = plt.axvline(x=best_k, animated = True, ls = "--", c = "red")
ax.set_ylabel("SSE")
ax.set_xlabel("number of clusters")
ax.set_title("Silhouette Coefficient");
fig.suptitle("K-Means", fontsize=20)
plt.tight_layout(pad=2.5);
#####################################################################################
def kmedoids_validation_example(kmedoids_kwargs, kmax, data):
sse = []
for k in range(1, kmax):
kmedoids = skx_cluster.KMedoids(n_clusters=k, **kmedoids_kwargs, random_state = 10 )
kmedoids.fit(data[["x1", "x2"]])
sse.append(kmedoids.inertia_)
kl = kneed.KneeLocator(range(1, kmax), sse, curve="convex", direction="decreasing")
kl.elbow
silhouette_coef = []
for k in range(2, kmax):
kmedoids = skx_cluster.KMedoids(n_clusters=k, **kmedoids_kwargs, random_state = 10 )
kmedoids.fit(data[["x1", "x2"]])
score = round(sk_metrics.silhouette_score(data, kmedoids.labels_),4)
silhouette_coef.append(score)
max_score = max(silhouette_coef)
best_k = 2+silhouette_coef.index(max_score)
fig, ax = plt.subplots(2,2, figsize = (20,20))
plt.subplot(221)
ax = sns.scatterplot(x= data.x1, y=data.x2, hue=data.label)
#ax.set_xlabel("number of clusters")
ax.set_title("Original dataset");
labels_optimal = skx_cluster.KMedoids(n_clusters=best_k, **kmedoids_kwargs ).fit_predict(data[["x1", "x2"]])
results_optimal = pd.DataFrame(data, columns = ["x1", "x2"])
results_optimal["labels"] = labels_optimal
plt.subplot(222)
ax = sns.scatterplot(x= results_optimal.x1, y=results_optimal.x2, hue=results_optimal.labels)
#ax1 = plt.axvline(x=best_k, animated = True, ls = "--", c = "red")
ax.set_title("Predicted clusters");
#fig.suptitle(f"k-means, Specifications: Centers: {centers}, Cluster std.: {cluster_std}", fontsize=16)
plt.subplot(223)
ax = sns.lineplot(x= range(1,kmax),y=sse)
ax1 = plt.axvline(x=kl.elbow, animated = True, ls = "--", c = "red")
ax.set_ylabel("SSE")
ax.set_xlabel("number of clusters")
ax.set_title("Ellbow Method");
plt.subplot(224)
ax = sns.lineplot(x=range(2,kmax), y=silhouette_coef)
ax1 = plt.axvline(x=best_k, animated = True, ls = "--", c = "red")
ax.set_ylabel("SSE")
ax.set_xlabel("number of clusters")
ax.set_title("Silhouette Coefficient");
fig.suptitle("K-Medoids", fontsize=20)
plt.tight_layout(pad=2.5);
#########################################################################
def benchmark_algorithm(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=2):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result = pd.DataFrame(np.nan * np.ones((len(dataset_sizes), sample_size)), columns = ["nobs","time"])
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
data, labels = sk_data.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters)
# Start the clustering with a timer
start_time = time.time()
cluster_function(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
# create list to temporarily store results
h_result = []
h_result.append(time_taken)
# calculate mean of time taken and add to result DataFRame
result.loc[index, "time"] = (sum(h_result)/len(h_result))
result.loc[index, "nobs"] = size
# Return the result as a dataframe for easier handling with seaborn afterwards
#return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size),
# result.flatten()]).T, columns=['x','y'])
return(result)
#########################################################################
def benchmark():
dataset_sizes = np.hstack([np.arange(1, 4) * 500, np.arange(3,7) * 1000, np.arange(4,6) * 2000])
#########
k_means = sk_cluster.KMeans(10, random_state = 10)
k_means_data = benchmark_algorithm(dataset_sizes, k_means.fit, (), {})
k_medoids = skx_cluster.KMedoids(random_state = 10)
k_medoids_data = benchmark_algorithm(dataset_sizes, k_medoids.fit, (), {})
#{'n_clusters':3, "init": "random", "max_iter": 300, "random_state": 42})
mean_shift = sk_cluster.MeanShift(10)
mean_shift_data = benchmark_algorithm(dataset_sizes, mean_shift.fit, (), {})
affinity_propagation = sk_cluster.AffinityPropagation(random_state = 10);
affinity_propagation_data = benchmark_algorithm(dataset_sizes, affinity_propagation.fit, (), {});
agglomarative_clustering = sk_cluster.AgglomerativeClustering();
agglomarative_clustering_data = benchmark_algorithm(dataset_sizes, agglomarative_clustering.fit, (), {});
##########
fig,ax = plt.subplots(figsize = (10,10))
ax = sns.lineplot(x= 'nobs', y='time', data=k_means_data, label='Sklearn K-Means')
ax = sns.lineplot(x= 'nobs', y='time', data=k_medoids_data, label='Sklearn K-Medoids')
ax = sns.lineplot(x= 'nobs', y='time', data=mean_shift_data, label='Sklearn Meanshift')
ax = sns.lineplot(x= 'nobs', y='time', data=affinity_propagation_data, label='Sklearn Affinity Propagation')
ax = sns.lineplot(x= 'nobs', y='time', data=agglomarative_clustering_data, label='Sklearn Agglomerative Clustering')
ax.set_xlabel("Size of dataset")
ax.set_ylabel("Time Taken per run in sec")
plt.plot();
#########################################################################
def simulate_data(n_samples, centers, cluster_std, cov):
data, true_labels = sk_data.make_blobs(n_samples=n_samples, centers = centers, cluster_std = cluster_std, random_state = 10)
data = np.dot(data, cov)
scaler = sk_preprocessing.StandardScaler()
data_s = scaler.fit_transform(data)
data_df = pd.DataFrame(data_s, columns = ["x1", "x2"])
data_df["label"] = true_labels
return(data_df)
#########################################################################
def simulation_study(reps, data, algorithm, args, kwds):
# non spiral data
metrics_df = pd.DataFrame(columns = ["FMI", "DBI", "SC", "time"])
for rep in range(reps):
start_time = time.time()
algo_fitted = algorithm(*args, **kwds).fit(data[["x1", "x2"]])
end_time = time.time()
results = pd.DataFrame(data, columns = ["x1", "x2"])
results["label"] = algo_fitted.labels_
FMS = sk_metrics.fowlkes_mallows_score(data.label, algo_fitted.labels_)
DBI = sk_metrics.davies_bouldin_score(data[["x1","x2"]], algo_fitted.labels_)
SC = sk_metrics.silhouette_score(data[["x1","x2"]], algo_fitted.labels_)
metrics_df.loc[rep] = [FMS, DBI,SC, (end_time - start_time)]
FMS_avg = round(metrics_df.FMI.mean(),4)
DBI_avg = round(metrics_df.DBI.mean(),4)
SC_avg = round(metrics_df.SC.mean(),4)
fin_metrics = pd.DataFrame(columns = ["Fowlkes Mallows Index", "Davies Bouldin Index", "Silhouette Score", "time", "reps"])
fin_metrics.loc['{}'.format(str(algorithm.__name__))] = [FMS_avg,DBI_avg, SC_avg, metrics_df.time.mean(), reps]
fig, ax = plt.subplots(1,2, figsize = (14,7))
plt.subplot(121)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = data.label)
ax.set_title('Original dataset', fontsize=10)
plt.subplot(122)
ax = sns.scatterplot(x=results.x1, y=results.x2, hue = results.label)
ax.set_title('Clusters found by {}'.format(str(algorithm.__name__)), fontsize=10)
return(algo_fitted, fin_metrics)
#########################################################################
def simulation_results(nreps, data):
kmeans, metrics1 = simulation_study(nreps, data, sk_cluster.KMeans, (), {'n_clusters':3, "init": "random", "n_init": 10, "max_iter": 300, "random_state" : 10})
kmedoids, metrics2 = simulation_study(nreps, data, skx_cluster.KMedoids, (), {'n_clusters':3, "init": "random", "max_iter": 300, "random_state" : 10})
affinity, metrics8 = simulation_study(nreps, data, sk_cluster.AffinityPropagation, (), {'preference':-5.0, 'damping':0.95,"max_iter": 300, "random_state" : 10})
meanshift, metrics3 = simulation_study(1, data, sk_cluster.MeanShift, (), {'cluster_all':False, "max_iter": 300})
agglo_complete, metrics4 = simulation_study(1, data, sk_cluster.AgglomerativeClustering, (), {'n_clusters':3, 'linkage':'ward'})
agglo_complete, metrics5 = simulation_study(1, data, sk_cluster.AgglomerativeClustering, (), {'n_clusters':3, 'linkage':'complete'})
agglo_complete, metrics6 = simulation_study(1, data, sk_cluster.AgglomerativeClustering, (), {'n_clusters':3, 'linkage':'average'})
agglo_complete, metrics7 = simulation_study(1, data, sk_cluster.AgglomerativeClustering, (), {'n_clusters':3, 'linkage':'single'})
i = ["-","-","-","-","ward","complete","average","single"]
frames = [metrics1, metrics2,metrics8, metrics3, metrics4, metrics5, metrics6, metrics7]
result_df = pd.DataFrame(pd.concat(frames), columns = ["Fowlkes Mallows Index", "Davies Bouldin Index", "Silhouette Score", "time","reps"])
result_df["further spec."] = i
return(result_df)
#########################################################################
``` |
{
"source": "JonathanWillnow/ose-course-data-science",
"score": 3
} |
#### File: Angrist_Krueger_1991/auxiliary/data_helper.py
```python
import numpy as np
import pandas as pd
import patsy
FILE_PATH_CENSUS80_EXTRACT = "data/QOB.txt"
FILE_PATH_FULL_CENSUS7080 = "data/NEW7080.dta"
def get_df_census80():
cols = [0, 1, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 23, 24, 26]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_csv(FILE_PATH_CENSUS80_EXTRACT, sep=" ", usecols=cols, names=cols_names)
# correct AGEQ
df.loc[df["CENSUS"] == 80, "AGEQ"] = df["AGEQ"] - 1900
return df
def get_df_census70():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df.loc[df["CENSUS"] == 70]
def get_df_census70_census_80():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df
def prepare_census_data(
df,
const=True,
qob=True,
yob=True,
age=True,
state_of_birth=False,
qob_x_yob=False,
qob_x_state=False,
):
if const:
df = add_constant(df)
if qob or qob_x_yob or qob_x_state:
df = add_quarter_of_birth_dummies(df)
if yob or qob_x_yob:
df = add_year_of_birth_dummies(df)
if age:
df = add_age_squared(df)
if state_of_birth or qob_x_state:
df = add_state_of_birth_dummies(df)
if qob_x_yob:
df = add_qob_yob_interactions(df)
if qob_x_state:
df = add_qob_state_interactions(df, qob_x_state)
return df
def add_constant(df):
df["CONST"] = 1
df["CONST"] = df["CONST"].astype(np.uint8)
return df
def get_constant_name():
return ["CONST"]
def add_quarter_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["QOB"], prefix="DUMMY_QOB")), axis=1)
def get_quarter_of_birth_dummy_names(start=1, end=3):
return [f"DUMMY_QOB_{j}" for j in range(start, end + 1)]
def add_year_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["YOB"] % 10, prefix="DUMMY_YOB")), axis=1)
def get_year_of_birth_dummy_names(start=0, end=8):
return [f"DUMMY_YOB_{i}" for i in range(start, end + 1)]
def add_age_squared(df):
df["AGESQ"] = df["AGEQ"].pow(2)
return df
def get_age_control_names(ageq=True, agesq=True):
lst = []
if ageq:
lst.append("AGEQ")
if agesq:
lst.append("AGESQ")
return lst
def add_state_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["STATE"], prefix="DUMMY_STATE")), axis=1)
def get_state_of_birth_dummy_names(state_list):
return [f"DUMMY_STATE_{i}" for i in state_list]
def get_state_list(df, rm_state=1):
state_list = set(df["STATE"])
state_list.remove(rm_state)
return state_list
def add_qob_yob_interactions(df):
interact_qob_yob = patsy.dmatrix(
" + ".join(get_qob_yob_interaction_names()), df, return_type="dataframe"
)
interact_qob_yob.drop("Intercept", axis=1, inplace=True)
return pd.concat((df, interact_qob_yob.astype(np.uint8)), axis=1)
def get_qob_yob_interaction_names(qob_start=1, qob_end=3, yob_start=0, yob_end=9):
return [
f"DUMMY_YOB_{i}:DUMMY_QOB_{j}"
for j in range(qob_start, qob_end + 1)
for i in range(yob_start, yob_end + 1)
]
def add_qob_state_interactions(df, state_list):
interact_qob_state = patsy.dmatrix(
" + ".join(get_qob_state_of_birth_interaction_names(state_list)),
df,
return_type="dataframe",
)
interact_qob_state.drop("Intercept", axis=1, inplace=True)
return pd.concat((df, interact_qob_state.astype(np.uint8)), axis=1)
def get_qob_state_of_birth_interaction_names(state_list):
return [f"DUMMY_STATE_{i}:DUMMY_QOB_{j}" for j in range(1, 4) for i in state_list]
def get_further_exogenous_regressors(race=True, smsa=True, married=True):
lst = []
if race:
lst.append("RACE")
if smsa:
lst.append("SMSA")
if married:
lst.append("MARRIED")
return lst
def get_region_of_residence_dummies():
return ["NEWENG", "MIDATL", "ENOCENT", "WNOCENT", "SOATL", "ESOCENT", "WSOCENT", "MT"]
def get_education_name():
return ["EDUC"]
def get_log_weekly_wage_name():
return ["LWKLYWGE"]
def add_education_dummies(df):
# dummy variable high school degree (12 or more years of education)
df["DUMMY_HIGH_SCHOOL"] = [1 if x >= 12 else 0 for x in df["EDUC"]]
# dummy variable college degree (16 or more years of education)
df["DUMMY_COLLEGE"] = [1 if x >= 16 else 0 for x in df["EDUC"]]
# dummy variable master's degree (18 or more years of education)
df["DUMMY_MASTER"] = [1 if x >= 18 else 0 for x in df["EDUC"]]
# dummy variable doctoral degree (20 or more years of education)
df["DUMMY_DOCTOR"] = [1 if x >= 20 else 0 for x in df["EDUC"]]
return df
def add_detrended_educational_variables(df, educ_vars=("EDUC")):
for ev in educ_vars:
mean_ev = df.groupby(["YOB", "QOB"])[ev].mean().to_frame()
mean_ev["MV_AVG"] = two_sided_moving_average(mean_ev.values)
for yob in set(df["YOB"]):
for qob in set(df["QOB"]):
df.loc[(df["YOB"] == yob) & (df["QOB"] == qob), f"MV_AVG_{ev}"] = mean_ev.loc[
(yob, qob), "MV_AVG"
]
df[f"DTRND_{ev}"] = df[ev] - df[f"MV_AVG_{ev}"]
return df
def two_sided_moving_average(x):
ma = np.full_like(x, np.nan)
for i in range(2, len(x) - 2):
ma[i] = (x[i - 2] + x[i - 1] + x[i + 1] + x[i + 2]) / 4
return ma
``` |
{
"source": "jonathan-winn-geo/cmatools",
"score": 3
} |
#### File: cmatools/examplesubpackage/example_numpy.py
```python
DEBUG = True
CONSTANT = 21
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def function_with_types_in_docstring(param1, param2):
"""Compare if param1 is greater than param2.
Example function with types documented in the docstring.
Function tests if param1 is greater than param2 (True) otherwise
returns False.
`PEP 484`_ type annotations are supported. If attribute, parameter, and
return types are annotated according to `PEP 484`_, they do not need to be
included in the docstring:
Parameters
----------
param1 : int
The first parameter.
param2 : str
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
result = None
try:
converted_param2 = int(param2)
if param1 > converted_param2:
result = True
else:
result = False
except ValueError:
print("Parameter 2 must be a string representing a number using digits [0-10]")
raise ValueError
except TypeError:
print("Parameter 1 must be an integer")
raise TypeError
print(f"Function called with: {param1} and {param2}")
print(f"Function returns: {result}")
return result
def function_with_pep484_type_annotations(param1: int, param2: str) -> bool:
"""Compare if param1 is greater than param2.
Example function with PEP 484 type annotations.
The return type must be duplicated in the docstring to comply
with the NumPy docstring style.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
result = None
try:
converted_param2 = int(param2)
if param1 > converted_param2:
result = True
else:
result = False
except ValueError:
print("Parameter 2 must be a string representing a number using digits [0-10]")
raise ValueError
except TypeError:
print("Parameter 1 must be an integer")
raise TypeError
print(f"Function called with: {param1} and {param2}")
print(f"Function returns: {result}")
return result
def module_level_function(param1, param2=None, *args, **kwargs):
"""Evaluate to true if any paramaters are greater than 100.
This is an example of a module level function.
Function parameters should be documented in the ``Parameters`` section.
The name of each parameter is required. The type and description of each
parameter is optional, but should be included if not obvious.
This example function calculates if any of the params are greater than
a target value of 100, and if so returns True
If *args or **kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name : type
description
The description may span multiple lines. Following lines
should be indented to match the first line of the description.
The ": type" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Parameters
----------
param1 : int
The first parameter.
param2 : :obj:`str`, optional
The second parameter.
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
Returns
-------
bool
True if successful, False otherwise.
The return type is not optional. The ``Returns`` section may span
multiple lines and paragraphs. Following lines should be indented to
match the first line of the description.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises
------
AttributeError
The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError
If `param2` is equal to `param1`.
ValueError
If `param2` is not a string
"""
if param1 == param2:
print(f"param1: {param1}, param2: {param2}")
error_message = "param1 may not be equal to param2"
print(error_message)
raise ValueError(error_message)
# Collect the params and find the max value
value_list = []
value_list.append(param1)
if param2:
if not isinstance(param2, str):
error_message = "param2 must be a string"
print(error_message)
raise ValueError(error_message)
else:
converted_param2 = int(param2)
value_list.append(converted_param2)
if args:
for x in args:
if not isinstance(x, int):
error_message = "args values must be integers"
print(error_message)
raise ValueError(error_message)
value_list.append(x)
if kwargs:
print("Metadata content")
for key, value in kwargs.items():
print(f"{key}: {value}")
if key == "verbose" and value is True:
print("Additional verbose output: ......................")
# Find max value from the compiled list
max_value = max(value_list)
print(
f"param1: {param1}, param2: {param2}, args: {args}, "
f"kwargs: {kwargs}. Max value: {max_value}"
)
# Function returns True if any of the params are greater than 100
target_value = 100
if max_value > target_value:
return True
else:
return False
def example_generator(n):
"""Yield next number.
Generators have a ``Yields`` section instead of a ``Returns`` section.
Parameters
----------
n : int
The upper limit of the range to generate, from 0 to `n` - 1.
Yields
------
int
The next number in the range of 0 to `n` - 1.
Raises
------
The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
TypeError
If `n` is not an integer
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
try:
for i in range(n):
yield i
except TypeError as err:
print("n must be an integer")
raise err
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Parameters
----------
msg : str
Human readable string describing the exception.
code : :obj:`int`, optional
Numeric error code.
Attributes
----------
msg : str
Human readable string describing the exception.
code : int
Numeric error code.
"""
def __init__(self, msg, code=None):
self.msg = msg
self.code = code
# def __str__(self):
# return f'{self.msg}, {self.code}'
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes
----------
attribute_string : :obj:`str`
Description of `attribute_string`.
attribute_list : :obj:`int`, optional
Description of `attr2`.
"""
# TODO - decide which init documentation method to follow
# params in class docstring, or under init?
def __init__(self, param1, param2, param3):
"""Docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Parameters
----------
param1 : str
Description of `param1`.
param2 : :obj:`list` of :obj:`str`
Description of `param2`. Multiple
lines are supported.
param3 : :obj:`int`, optional
Description of `param3`.
"""
self.attribute_string = param1
self.attribute_list = param2
self.attribute_integer = param3 #: Doc comment *inline* with attribute
#: list of str: Doc comment *before* attribute, with type specified
self.attr4 = ["attr4"]
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def attribute_string(self):
"""Get the attribute string.
Getting or setting the attribute string value will verify the value
is a string.
"""
return self._attribute_string
@attribute_string.setter
def attribute_string(self, value):
if not isinstance(value, str):
raise TypeError("param1 must be a string")
self._attribute_string = value
@property
def attribute_list(self):
"""Get the attribute list.
Getting or setting the attribute list value will verify the value
is a list of strings.
"""
return self._attribute_list
@attribute_list.setter
def attribute_list(self, value):
if not isinstance(value, list):
raise TypeError("param2 must be a list of strings")
else:
for element in value:
if not isinstance(element, str):
raise TypeError("param2 must be a list of strings")
self._attribute_list = value
@property
def attribute_integer(self):
"""Get the attribute integer.
Getting or setting the attribute integer value will verify the value
is an integer.
"""
return self._attribute_integer
@attribute_integer.setter
def attribute_integer(self, value):
if not isinstance(value, int):
raise TypeError("param3 must be an integer")
self._attribute_integer = value
@property
def readonly_property(self):
"""str: Get readonly_property.
Properties should be documented in their getter method.
"""
return "readonly_property"
# pylint: disable=R0201
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
return True
def __special_without_docstring__(self): # noqa: D105
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
if __name__ == "__main__":
print("Running example numpy module")
print(f"DEBUG constant set to: {DEBUG}")
function_with_types_in_docstring(1, 2)
function_with_types_in_docstring(1, "test")
```
#### File: cmatools/helloworld/hello_world.py
```python
def hello_world():
"""Hello world."""
return "hello cma"
def hello_all():
"""Hello world."""
return "hello all"
```
#### File: cmatools/io/io_common.py
```python
import configparser
import os
import tarfile
from pathlib import Path
import requests
from cmatools.definitions import CONFIGFILE, CONFIGLOGS, ROOT_DIR
# Read from user-editable config file
config = configparser.ConfigParser()
config.read(CONFIGFILE)
# Get directory for analysis output files for download
datadir_outputs = config.get("DATADIR", "OUTPUTS")
# Get location where local input data will be saved, after download
datadir_root = config.get("DATADIR", "ROOT")
datadir_inputs = config.get("DATADIR", "INPUTS")
datadir_archives = config.get("DATADIR", "ARCHIVES")
# TODO check and add equiv func for scratchdir, or refactor to deal with both dirs
DEBUG = True
def return_datadir_root_dir(datadir_root: str):
"""Set datadir root directory, based on user config input.
Parameters
----------
datadir_root
The value set within user editable config file.
"""
# TODO add link to config file
# Deal with optional use of home dir
if datadir_root == "repo":
root_dir = ROOT_DIR
# Deal with optional use of home dir
elif datadir_root == "~":
root_dir = os.path.expanduser("~")
# Set path based on user selected config value
else:
root_dir = datadir_root
if DEBUG:
print(f"Root data directory user config setting: {datadir_root}")
print(f"Root data directory path set as: {root_dir}")
# Validate the user selected datadir is a directory and files are writeable
if check_access(root_dir):
return root_dir
else:
raise Exception(
"Datadir root directory not accessible: check value of "
"DATADIR / ROOT in config.ini"
)
def return_datadir_archives_dir() -> Path:
"""Get and create datadir archives directory within datadir."""
archivedir = Path(return_datadir_root_dir(datadir_root)) / datadir_archives
archivedir.mkdir(parents=True, exist_ok=True)
if DEBUG:
print(f"Archive data directory path: {archivedir}")
return archivedir
def return_datadir_inputs_dir() -> Path:
"""Get and create datadir input directory within datadir."""
inputdir = Path(return_datadir_root_dir(datadir_root)) / datadir_inputs
inputdir.mkdir(parents=True, exist_ok=True)
if DEBUG:
print(f"Input data directory path: {inputdir}")
return inputdir
def return_datadir_outputs_dir() -> Path:
"""Get and create datadir output directory within datadir."""
outputdir = Path(return_datadir_root_dir(datadir_root)) / datadir_outputs
outputdir.mkdir(parents=True, exist_ok=True)
if DEBUG:
print(f"Outputs data directory: {datadir_outputs}")
return outputdir
def write_source_config(archivename, extractname) -> None:
"""Write archive content names from data input source files."""
# TODO refactor
config = configparser.ConfigParser()
config.read(CONFIGLOGS)
# config.add_section('SECTION_NAME')
config["SOURCES"]["COP_ARCHIVENAME"] = archivename
config["SOURCES"]["COP_FILENAME"] = extractname
with open(CONFIGLOGS, "w") as configfile:
config.write(configfile)
def extract_archive_singlefile(archivedir: Path, inputsdir: Path, filename: str) -> str:
"""Extract files from tarfile.
Parameters
----------
archivedir
The directory holding tarfiles.
inputsdir
The directory where tarfile contents will be extracted to.
filename
The filename of the tarfile archive.
Returns
-------
str
The content filename within the tarfile archive .
"""
# Set the full path to the archive file
archivefilepath = archivedir / filename
with tarfile.open(archivefilepath, "r") as archive:
if DEBUG:
print(archive.getmembers())
print(archive.getnames()[0])
content_filename = archive.getnames()[0]
print("---")
# extract all files
archive.extract(archive.getnames()[0], path=inputsdir)
print(f"Tar file extracted to: {inputsdir}/{content_filename}")
return content_filename
def extract_archive_multi(filename: str) -> None:
"""Extract all files from archive.
Parameters
----------
filename
The filename of the tarfile archive.
"""
# TODO refactor
outputfilepath = Path(return_datadir_inputs_dir()) / filename
with tarfile.open(outputfilepath, "r") as archive:
print(archive.getmembers())
print(archive.getnames())
# Extract all files
archive.extractall(return_datadir_inputs_dir())
def download(url) -> None:
"""Download requested file from URL.
Parameters
----------
url
The full url path to the file to download.
"""
# Set as path object
urlpath = Path(url)
# Get filename from url
urlfilename = urlpath.name
# Set full path + name to downloaded location
file_path = return_datadir_inputs_dir() / urlfilename
# Open in binary mode
with open(file_path, "wb") as file:
# Get response request
response = requests.get(url)
# Check if an error has occurred
response.raise_for_status()
# Deal with potentially missing encoding header
encoding = "None listed"
if "Content-Encoding" in response.headers:
encoding = response.headers["Content-Encoding"]
if DEBUG:
print(f"Request sent: {response.request.headers}")
print(
f'Request response Content-Type: {response.headers["content-type"]} '
f"with Content-Encoding: {encoding}"
)
print(f"Request response status code: {response.status_code}")
print(f"Request response headers: {response.headers}")
print(f"Response response filename: {urlfilename}")
print(f"Download destination: {file_path}")
# Write downloaded content to file (raw response bytes)
file.write(response.content)
def check_access(directory: str) -> bool:
"""Check if the directory is accessible and writeable.
Parameters
----------
directory
The directory to be checked
Returns
-------
bool
Returns True if directory is accessible
"""
path = Path(directory)
file = path / "test.txt"
try:
file.touch(exist_ok=True)
file.unlink() # Remove file
return True
except FileNotFoundError as error:
print(error)
print("Check that root dir has been correctly set in config.ini")
raise FileNotFoundError
```
#### File: cmatools/subpackage/example.py
```python
def this_one():
"""Print this."""
print("this")
def another_one():
"""Print that."""
print("that")
```
#### File: a_unit/examplesubpackage/test_example_cmatools.py
```python
import types
import pytest
from cmatools.examplesubpackage.example_cmatools import (
ExampleClassAnother,
ExampleCMAClass,
ExampleCMAError,
example_generator,
function_with_pep484_type_annotations,
module_level_function,
)
# TODO check, refactor and fix, currently based on example_numpy.py
def test_function_with_pep484_type_annotations():
"""Test function_with_pep484_type_annotations."""
# Expect return True if param1 is > param2
assert function_with_pep484_type_annotations(12, "10") is True
# Expect return False if param1 < param2
assert function_with_pep484_type_annotations(2, "100") is False
def test_function_with_pep484_type_annotations_raises_error():
"""Test raises error: function_with_pep484_type_annotations."""
# Expect error raised if param2 is a word not digits
with pytest.raises(ValueError):
function_with_pep484_type_annotations(12, "ten")
# Expect error raised if param1 is not an integer
with pytest.raises(TypeError):
function_with_pep484_type_annotations("fifteen", "10")
def test_module_level_function_positional():
"""Test module_level_function with positional arguments."""
# Expect returns False, as value is less than target(100)
assert module_level_function(5) is False
# Expect returns False, as value is less than target(100)
assert module_level_function(5, "1") is False
# Expect return True, as value greater than target(100)
assert module_level_function(500, "1") is True
# Expect return True, as value greater than target(100)
assert module_level_function(500) is True
def test_module_level_function_positional_and_args():
"""Test module_level_function with positional arguments and *args list."""
# Expect returns False, as value is less than target(100)
assert module_level_function(5, "15", 5, 7, 8, 9) is False
# Expect returns False, as value is less than target(100)
assert module_level_function(5, "1", 5, 12, 78) is False
# Expect return True, as value greater than target(100)
assert module_level_function(500, "1", 45, 67, 888) is True
# Expect return True, as value greater than target(100)
assert module_level_function(500, "111", 12, 24, 35, 777) is True
def test_module_level_function_positional_args_and_kwargs():
"""Test module_level_function with positional arguments, *args and **kwargs."""
# Expect returns False, as value is less than target(100)
assert (
module_level_function(5, "15", 5, 7, 8, 9, QC="High", source="HadObs", CF=1.7)
is False
)
# Expect returns False, as value is less than target(100)
assert module_level_function(5, "1", 5, 12, 78, QC="Low") is False
# Expect return True, as value greater than target(100)
assert module_level_function(500, "1", 45, 67, 888, source="CEDA") is True
# Expect return True, as value greater than target(100)
assert (
module_level_function(500, "111", 12, 24, 35, 777, CF=1.6, source="CEDA")
is True
)
# Expect return True, as value greater than target(100)
# optional verbose param triggers verbose output
assert (
module_level_function(
500, "111", 12, 24, 35, 777, CF=1.6, source="CEDA", verbose=True
)
is True
)
def test_module_level_function_raises_error():
"""Test module_level_function raises error."""
# Expect raises error as param1 and param2 cannot be equal
with pytest.raises(ValueError):
module_level_function(1, 1)
# Expect raises error as param 2 is not a string
with pytest.raises(ValueError):
module_level_function(5, 1)
def test_example_generator():
"""Test example_generator function."""
# Confirm instance type
assert isinstance(example_generator(7), types.GeneratorType)
# Confirm expected output from use of generator in list comprehension
assert [i for i in example_generator(4)] == [0, 1, 2, 3]
# Confirm yield next values from generator
example_generator_value = example_generator(4)
assert next(example_generator_value) == 0
assert next(example_generator_value) == 1
assert next(example_generator_value) == 2
def test_example_generator_raises_error():
"""Test example_generator function raises error."""
# Confirm raises error if n is a string
with pytest.raises(TypeError):
result = [i for i in example_generator("12")]
print(result)
def test_example_error():
"""Test example_error class."""
# Confirm error message is raised correctly
with pytest.raises(ExampleCMAError, match="must be 0 or None"):
raise ExampleCMAError("value must be 0 or None")
# Confirm error message is raised correctly, when called with message and err code
with pytest.raises(ExampleCMAError, match="must be 0 or None"):
raise ExampleCMAError("value must be 0 or None", 24)
# pylint: disable=no-value-for-parameter
def test_example_class():
"""Test for example_class."""
# Instantiate class instance object
example = ExampleCMAClass("12", ["10", "20", "30"], 8888)
assert isinstance(example, ExampleCMAClass)
# pylint: enable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
def test_example_class_properties():
"""Test example_class properties."""
# ExampleClass('1', ['10', '29'], 3).attribute_string.func(11)
# Create a valid ExampleClass object
example = ExampleCMAClass("1", ["10", "29"], 3)
# test readonly attribute
assert example.readonly_property == "readonly_property"
# pylint: disable=no-value-for-parameter
# TODO - check here - integration vs unit test?
def test_example_class_raises_error():
"""Test example_class errors."""
# Check error is raised if param1 is not a string
with pytest.raises(TypeError, match="must be a string"):
ExampleCMAClass(1, ["10", "29"], 3)
# Check error is raised if param2 is not a list of strings
with pytest.raises(TypeError, match="must be a list of strings"):
ExampleCMAClass("1", "param2", "param3")
# Check error is raised if param2 is not list of strings
with pytest.raises(TypeError, match="must be a list of strings"):
ExampleCMAClass("1", [12, 13, 4], 3)
# Check error is raised if param3 is not an integer
with pytest.raises(TypeError, match="must be an integer"):
ExampleCMAClass("1", ["12", "23", "345"], "param3")
# Check that error is raised if a valid attribute is re-assigned an invalid value
example = ExampleCMAClass("12", ["10", "20", "30"], 8888) # Initiated as valid
example.attribute_integer = 9999 # Valid re-assignment
with pytest.raises(TypeError, match="must be an integer"):
example.attribute_integer = "8888" # Invalid, as is string, not integer
# pylint: enable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
def test_example_class_example_method():
"""Test example_class.example_method."""
# Create a valid ExampleClass object
example = ExampleCMAClass("1", ["10", "29"], 3)
# Test class method, expect return True
# 3 + 50 + 40 = 92, 92 > 21 , therefore True
assert example.example_method(50, 40) is True
# pylint: enable=no-value-for-parameter
def test_example_class_another():
"""Test example_class_another."""
# Instantiate class instance object and check class attributes
example = ExampleClassAnother("param1value", "param2value", 3)
assert example.reference_period == "1990-2020"
assert example.QC_LEVEL == "High"
# Access and check class attributes directly from the class
assert ExampleClassAnother.reference_period == "1990-2020"
assert ExampleClassAnother.QC_LEVEL == "High"
# Test class method, expect return False
# 3 !> 21
assert example.example_method() is False
# Test class method, expect return True
# (3 + 30 + 2) > 21
assert example.another_example_method(30, 2) is True
```
#### File: a_unit/io/test_io_common.py
```python
import os
from unittest.mock import patch
import pytest
from cmatools.definitions import ROOT_DIR
from cmatools.io.io_common import check_access, return_datadir_root_dir
# extract_archive_singlefile,; return_datadir_inputs_dir,; write_source_config,
# from pathlib import Path
DEBUG = True
# TODO refactor
# def test_extract_archive_singlefile():
#
# filename = "eobs.tgz"
# extractedfile = extract_archive_singlefile(filename)
# file = Path(return_datadir_inputs_dir() / extractedfile)
# print("Test ouput")
# print(extractedfile)
# print(file)
#
# assert file.is_file()
#
#
# def test_write_source_config():
#
# archivename = 'arcfile'
# extractfilename = 'extfile'
#
# write_source_config(archivename,extractfilename)
def test_return_datadir_root_dir_repo_input():
"""Test datadir root value with arg: input."""
assert return_datadir_root_dir("repo") == ROOT_DIR
def test_return_datadir_root_dir_temp_input(tmp_path):
"""Test datadir root value with arg: custom path."""
root_dir = tmp_path / "fake_sub_dir"
root_dir.mkdir()
assert return_datadir_root_dir(root_dir) == root_dir
# Can't know value of home ~ , so use mock
# Mocked home dir will not be accessible, so also need to mock check_access()
@patch("cmatools.io.io_common.check_access")
def test_return_datadir_root_dir_home_input(function_mock, monkeypatch):
"""Test datadir root value with arg: home ~."""
monkeypatch.setattr(os.path, "expanduser", lambda home: "/home/name/datadir")
function_mock.return_value = True
assert return_datadir_root_dir("~") == "/home/name/datadir"
def test_return_datadir_root_dir_bad_inputs():
"""Test exception raised."""
with pytest.raises(Exception):
return_datadir_root_dir("epor")
def test_check_access_raises_exception():
"""Test exception raised."""
root_dir = "/home/name/not_a_subdir"
with pytest.raises(FileNotFoundError):
check_access(root_dir)
def test_check_access():
"""Test check access function."""
# Root repo dir should be accessible
assert check_access(ROOT_DIR)
# User home root dir should be accessible
root_dir = os.path.expanduser("~")
assert check_access(root_dir)
```
#### File: b_integration/cli/test_cli_copernicus_download.py
```python
import argparse
from pathlib import Path
from cmatools.definitions import SRC_DIR
DEBUG = True
"""bool: Debugging module-level constant (Default: True)."""
package = "cmatools"
tool = "cli_copernicus_download"
# Define cli tool filepath
CLI = Path(SRC_DIR, package, tool)
"""str: Filepath to command line tool module."""
# TODO - fix these via mock
def test_cli_copernicus_download():
"""Test for cli_coperncius_download() function."""
parsed_args = argparse.Namespace(portal="COP", dataset="E-OBS", dryrun="True")
print(parsed_args)
# mock out the source data settings
# output = cli_analysis(parsed_args)
# # Default analysis is product: 1 * 2 = 2
# assert output == 2
#
# parsed_args = argparse.Namespace(
# x=1, y=2, sum=True, max=None, combined=False, verbose=False
# )
# output = cli_analysis(parsed_args)
# # Sum analysis: 1 + 2 = 3
# assert output == 3
```
#### File: tests/c_end_to_end/test_end_to_end.py
```python
def test_example_end_to_end():
"""Tests the end-to-end system."""
assert 1 == 1
# TODO - add more tests
```
#### File: d_user_interface/install/test_install.py
```python
import subprocess # nosec # bandit ignore
from importlib import metadata
import pkg_resources
from cmatools.definitions import PACKAGE, ROOT_DIR
def test_package_installed():
"""Test current package installation location."""
version = pkg_resources.get_distribution(PACKAGE).version
print("Version: ", version)
subprocess.run(["pip", "show", PACKAGE], check=True) # nosec
print("Source code root: ", ROOT_DIR)
def test_installed_version():
"""Test for current package version."""
version = pkg_resources.get_distribution(PACKAGE).version
assert version == "0.0.1"
# TODO - replace this test with a call to compare source
# version from files vs current installed version
version_alternative = metadata.version(PACKAGE)
assert version == version_alternative
``` |
{
"source": "jonathan-winn-geo/new-repo-example",
"score": 4
} |
#### File: src/cmatools/cli_hello_world.py
```python
import argparse
import pkg_resources
import sys
DEBUG = True
# Take the version number from the package version in setup
pkg_version = pkg_resources.get_distribution("cmatools").version
def cli_parser() -> argparse.ArgumentParser:
"""Factory function to create parser object for arguments from the command line
Returns
-------
parser : argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
# Adds cli app title, if ommitted the filename is used (e.g. cli-simple.py)
prog="CLI-HELLO",
description="A simple hello-world command line app",
epilog=" --- ",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Returns a parser object, with the metadata and arguments set
return parser
def cli_arguments(parser) -> argparse.ArgumentParser:
"""Adds cli tool arguments to an existing parser
Parameters
----------
parser : argparse.ArgumentParser
An argparse parser
Returns
-------
parser : argparse.ArgumentParser
Parser object, with specified arguments
"""
# Arguments in argparse can be optional, positional or required, set to required to force user input
# Add named arguments (required for the tool to run)
# Set the argument type (e.g. int) and limit choices from a list
parser.add_argument("user", type=str, help="Enter username")
# Returns a parser object, with the arguments set
return parser
def build_parser() -> argparse.ArgumentParser:
"""Function to build a parser with program details and arguments set
Returns
-------
parser : argparse.ArgumentParser
"""
# Instantiate cli parser object
parser = cli_parser()
# Add the arguments
cli_arguments(parser)
# Returns a parser object, with the arguments set
return parser
def cli_parse_args(argv=None) -> argparse.Namespace:
"""Function to parse the passed arguments into an argparse namespace object
Parameters
----------
argv : str
The arguments from the command line
Returns
-------
parser.parse_args(argv) : argparse.Namespace
The namespace object holding the command line arguments
"""
# Instantiate cli parser object
# Add the arguments
parser = build_parser()
# Parse the arguments
# ArgumentParser.parse_args processes whatever list of strings you pass it.
# When you pass None, it uses sys.argv[1:] instead, this allows testing
return parser.parse_args(argv)
def cli_hello(parsed_args) -> str:
"""Function to print text to the terminal
Parameters
----------
parsed_args : argparse.Namespace
The command line arguments parsed into a namespace object
Returns
-------
analysis_product : int
The result of the multiplication of `parsed_args.x` and `parsed_args.y`
"""
terminal_output = f"Hello: {parsed_args.user}"
if DEBUG:
print(parsed_args)
# Simple print to show the cli tool is working
print("The cli hello-world tool has run")
print(f"The user value is: {parsed_args.user}")
print(f"The output is: {terminal_output}")
return terminal_output
def cli_entry_point(argv=None) -> None:
"""Function to wrap passing the parsed command line arguments to
the analysis function
Parameters
----------
argv : str
The command line arguments
Returns
-------
None
"""
# Read arguments from the command line
# Parsed arguments are present as object attributes
# Pass the parsed arguments to the cli analysis function
cli_hello(cli_parse_args(argv))
# The entrypoint returns none
if __name__ == "__main__":
# Runs entry point function when called as main
cli_entry_point()
if DEBUG:
print("------")
print(f"Number of arguments: {len(sys.argv)} arguments.")
print(f"Argument List: {str(sys.argv)}")
print("------")
```
#### File: tests/integration/test_combined.py
```python
from cmatools.combine.combine import combined
def test_combined():
"""Test of combined function"""
assert combined() == "this hello cma"
``` |
{
"source": "JonathanWMorris/automations_bot",
"score": 3
} |
#### File: JonathanWMorris/automations_bot/main.py
```python
import discord
import commands
import messages
import constants
import time
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('-!hello'):
await message.channel.send(messages.welcome_message)
elif message.content.startswith('-!cat'):
image_url = commands.get_cat_image_url()
await message.channel.send(image_url)
elif message.content.startswith("-!private"):
await message.delete(delay=2)
elif message.content.startswith("-!fact"):
fact = commands.get_fact()
await message.channel.send(fact)
elif message.content.startswith("-!animals"):
command = commands.get_animal_command()
await message.channel.send(command)
elif message.content.startswith("-!help"):
await message.channel.send(messages.help_message)
elif message.content.startswith("-!verifyAll"):
names = message.channel.members
list_of_names = []
for name in names:
list_of_names.append(name.display_name)
responses = commands.get_batch_verification(list_of_names)
for response in responses:
await message.channel.send(response)
elif message.content.startswith("-!verify"):
name = message.content.replace("-!verify ", "")
response = commands.get_verification(name)
await message.channel.send(response)
elif message.content.startswith("-!names"):
names = message.channel.members
list_of_names = []
for name in names:
list_of_names.append(str(name.display_name))
await message.channel.send(list_of_names)
elif message.content.startswith("-!yoda"):
sentence = message.content.replace("-!yoda ", "")
yoda_sentence = commands.get_yoda_speak(sentence)
await message.channel.send(yoda_sentence)
time.sleep(1)
await message.delete()
# elif message.content.startswith("-!mute"):
# person = message.content.replace("-!mute ", "")
# commands.muted_people.append(person)
# await message.delete()
# await message.channel.send(f"{person} has been muted successfully")
#
# elif message.content.startswith("-!unmute"):
# person = message.content.replace("-!unmute ", "")
# commands.muted_people.remove(person)
# await message.delete()
# await message.channel.send(f"{person} has been un-muted successfully")
elif message.content.startswith("-!joke"):
joke = commands.get_joke()
await message.channel.send(joke.setup)
time.sleep(2)
await message.channel.send(joke.punchline)
elif message.content.startswith("-!nasa"):
search = message.content.replace("-!nasa ", "")
if search == "-!nasa":
await message.channel.send("SMH, You need to specify what images you want. 🤦 -!nasa {search}")
return
image_url = commands.get_nasa_image_url(search)
await message.channel.send(image_url)
elif message.content.startswith("-!"):
await message.channel.send(messages.response_text)
# All these are checking for NSFW content
if message.content.__contains__(".gif") \
or message.content.__contains__(".jpg") or message.content.__contains__(".png"):
is_NSFW = commands.check_nsfw_image(message.content, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if message.content.__contains__(".mov") \
or message.content.__contains__(".mp4") or message.content.__contains__(".avi"):
is_NSFW = commands.check_nsfw_video(message.content, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if message.attachments:
for attachment in message.attachments:
if attachment.url.__contains__(".jpg") \
or attachment.url.__contains__(".png") or attachment.url.__contains__(".gif"):
is_NSFW = commands.check_nsfw_image(attachment.url, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if attachment.url.__contains__(".mov") \
or attachment.url.__contains__(".mp4") or attachment.url.__contains__(".avi"):
is_NSFW = commands.check_nsfw_video(attachment.url, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if message.content.__contains__("827972357368053830"):
await message.channel.send(messages.mention_message)
if message.embeds:
for embed in message.embeds:
if embed.image:
is_NSFW = commands.check_nsfw_image(embed.image.url, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if embed.video:
is_NSFW = commands.check_nsfw_video(embed.video.url, message.channel.name)
if is_NSFW:
await message.channel.send(messages.nsfw_content_message(message.author.display_name))
await message.delete()
if message.author.display_name in commands.muted_people:
await message.delete()
client.run(constants.discord_token)
``` |
{
"source": "jonathanwsmith/hassio-broadlink-manager",
"score": 2
} |
#### File: hassio-broadlink-manager/broadlinkmanager/broadlinkmanager.py
```python
from flask import Flask, request, make_response, render_template, url_for, g
from flask import send_from_directory, jsonify
from flask_restful import Resource, Api
from json import dumps
import os
from os import environ
import json, argparse, datetime, subprocess, time
import broadlink
from broadlink.exceptions import ReadError, StorageError
from subprocess import call
#endregion
#region Parsing Default arguments for descovery
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--timeout", type=int, default=5,
help="timeout to wait for receiving discovery responses")
parser.add_argument("--ip", default=None,
help="ip address to use in the discovery")
parser.add_argument("--dst-ip", default="255.255.255.255",
help="destination ip address to use in the discovery")
args = parser.parse_args()
#endregion
#region Declaring Flask app
app = Flask(__name__)
api = Api(app)
#endregion
#region Global Properties
_continu_to_sweep=False
_rf_sweep_message=''
_rf_sweep_status=False
TICK = 32.84
IR_TOKEN = <PASSWORD>
TIMEOUT = 30
#endregion
#region Broadlink Helper Methods
def getDeviceName(deviceType):
name = {
0x2711: "SP2",
0x2719: "Honeywell SP2",
0x7919: "Honeywell SP2",
0x271a: "Honeywell SP2",
0x791a: "Honeywell SP2",
0x2720: "SPMini",
0x753e: "SP3",
0x7D00: "OEM branded SP3",
0x947a: "SP3S",
0x9479: "SP3S",
0x2728: "SPMini2",
0x2733: "OEM branded SPMini",
0x273e: "OEM branded SPMini",
0x7530: "OEM branded SPMini2",
0x7546: "OEM branded SPMini2",
0x7918: "OEM branded SPMini2",
0x7D0D: "TMall OEM SPMini3",
0x2736: "SPMiniPlus",
0x2712: "RM2",
0x2737: "RM Mini",
0x273d: "RM Pro Phicomm",
0x2783: "RM2 Home Plus",
0x277c: "RM2 Home Plus GDT",
0x272a: "RM2 Pro Plus",
0x2787: "RM2 Pro Plus2",
0x279d: "RM2 Pro Plus3",
0x27a9: "RM2 Pro Plus_300",
0x278b: "RM2 Pro Plus BL",
0x2797: "RM2 Pro Plus HYC",
0x27a1: "RM2 Pro Plus R1",
0x27a6: "RM2 Pro PP",
0x278f: "RM Mini Shate",
0x27c2: "RM Mini 3",
0x2714: "A1",
0x4EB5: "MP1",
0x4EF7: "Honyar oem mp1",
0x4EAD: "Hysen controller",
0x2722: "S1 (SmartOne Alarm Kit)",
0x4E4D: "Dooya DT360E (DOOYA_CURTAIN_V2)",
0x51da: "RM4 Mini",
0x5f36: "RM Mini 3",
0x6026: "RM4 Pro",
0x6070: "RM4c Mini",
0x61a2: "RM4 Pro",
0x610e: "RM4 Mini",
0x610f: "RM4c",
0x62bc: "RM4 Mini",
0x62be: "RM4c Mini",
0x51E3: "BG Electrical Smart Power Socket",
0x60c8: "RGB Smart Bulb",
}
return name.get(deviceType, "Not Supported")
def auto_int(x):
return int(x, 0)
def to_microseconds(bytes):
result = []
# print bytes[0] # 0x26 = 38for IR
index = 4
while index < len(bytes):
chunk = bytes[index]
index += 1
if chunk == 0:
chunk = bytes[index]
chunk = 256 * chunk + bytes[index + 1]
index += 2
result.append(int(round(chunk * TICK)))
if chunk == 0x0d05:
break
return result
def durations_to_broadlink(durations):
result = bytearray()
result.append(IR_TOKEN)
result.append(0)
result.append(len(durations) % 256)
result.append(len(durations) / 256)
for dur in durations:
num = int(round(dur / TICK))
if num > 255:
result.append(0)
result.append(num / 256)
result.append(num % 256)
return result
def format_durations(data):
result = ''
for i in range(0, len(data)):
if len(result) > 0:
result += ' '
result += ('+' if i % 2 == 0 else '-') + str(data[i])
return result
def parse_durations(str):
result = []
for s in str.split():
result.append(abs(int(s)))
return result
#endregion
#region UI Rendering Methods
#Homepage (Devices)
@app.route('/')
def devices():
return render_template('index.html')
@app.route('/generator')
def generator():
return render_template('generator.html')
@app.route('/livolo')
def livolo():
return render_template('livolo.html')
@app.route('/energenie')
def energenie():
return render_template('energenie.html')
@app.route('/repeats')
def repeats():
return render_template('repeats.html')
@app.route('/convert')
def convert():
return render_template('convert.html')
@app.route('/about')
def about():
return render_template('about.html')
#endregion UI Rendering Methods
#region API Methods
# Learn IR
@app.route('/ir/learn')
def learnir():
dtype = int(request.args.get('type'), 0)
host = request.args.get('host')
mac = bytearray.fromhex(request.args.get('mac'))
dev = broadlink.gendevice(dtype, (host, 80), mac)
dev.auth()
dev.enter_learning()
start = time.time()
while time.time() - start < TIMEOUT:
time.sleep(1)
try:
data = dev.check_data()
except (ReadError, StorageError):
continue
else:
break
else:
return jsonify('{"data":"","success":0,"message":"No Data Recived"}')
learned = ''.join(format(x, '02x') for x in bytearray(data))
return jsonify('{"data":"' + learned + '","success":1,"message":"IR Data Recived"}')
# Send IR/RF
@app.route('/command/send')
def command():
dtype = int(request.args.get('type'), 0)
host = request.args.get('host')
mac = bytearray.fromhex(request.args.get('mac'))
dev = broadlink.gendevice(dtype, (host, 80), mac)
command = request.args.get('command')
dev.auth()
try:
dev.send_data(bytearray.fromhex(''.join(command)))
return jsonify('{"data":"","success":1,"message":"Command sent successfully"}')
except:
return jsonify('{"data":"","success":0,"message":"Error occurred while Sending command!"}')
#Learn RF
@app.route('/rf/learn')
def sweep():
global _continu_to_sweep
global _rf_sweep_message
global _rf_sweep_status
_continu_to_sweep=False
_rf_sweep_message=''
_rf_sweep_status=False
dtype = int(request.args.get('type'), 0)
host = request.args.get('host')
mac = bytearray.fromhex(request.args.get('mac'))
dev = broadlink.gendevice(dtype, (host, 80), mac)
dev.auth()
dev.sweep_frequency()
_rf_sweep_message = "Learning RF Frequency, press and hold the button to learn..."
start = time.time()
while time.time() - start < TIMEOUT:
time.sleep(1)
if dev.check_frequency():
break
else:
_rf_sweep_message = "RF Frequency not found!"
dev.cancel_sweep_frequency()
return jsonify('{"data":"RF Frequency not found!","success":0}')
_rf_sweep_message = "Found RF Frequency - 1 of 2!"
time.sleep(1)
_rf_sweep_message = "You can now let go of the button"
_rf_sweep_status=True
while _continu_to_sweep == False:
_rf_sweep_message = "Click The Continue button"
_rf_sweep_message = "To complete learning, single press the button you want to learn"
_rf_sweep_status=False
dev.find_rf_packet()
start = time.time()
while time.time() - start < TIMEOUT:
time.sleep(1)
try:
data = dev.check_data()
except (ReadError, StorageError):
continue
else:
break
else:
_rf_sweep_message = "No Data Found"
return jsonify('{"data":"No Data Found"}')
_rf_sweep_message = "Found RF Frequency - 2 of 2!"
learned = ''.join(format(x, '02x') for x in bytearray(data))
return jsonify('{"data":"' + data + '"}')
#Get RF Learning state
@app.route('/rf/status')
def rfstatus():
global _continu_to_sweep
global _rf_sweep_message
global _rf_sweep_status
return jsonify('{"_continu_to_sweep":"' + str(_continu_to_sweep) + '","_rf_sweep_message":"' + _rf_sweep_message + '","_rf_sweep_status":"' + str(_rf_sweep_status) + '" }')
@app.route('/rf/continue')
def rfcontinue():
global _continu_to_sweep
global _rf_sweep_status
_rf_sweep_status = True
_continu_to_sweep = True
return jsonify('{"_continu_to_sweep":"' + str(_continu_to_sweep) + '","_rf_sweep_message":"' + _rf_sweep_message + '","_rf_sweep_status":"' + str(_rf_sweep_status) + '" }')
# Discover Devices
@app.route('/discover')
def discover():
_devices = '['
devices = broadlink.discover(
timeout=5, local_ip_address=None, discover_ip_address="255.255.255.255")
for device in devices:
if device.auth():
_devices = _devices + '{"name":"' + \
getDeviceName(device.devtype) + '",'
_devices = _devices + '"type":"' + \
format(hex(device.devtype)) + '",'
_devices = _devices + '"ip":"' + device.host[0] + '",'
_devices = _devices + '"mac":"' + \
''.join(format(x, '02x') for x in device.mac[::-1]) + '"},'
_devices = _devices[:-1] + ']'
return jsonify(_devices)
#endregion API Methods
#region Serving Static Files
#Serve Javascript
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('dist/js', path)
#Serve CSS
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('dist/css', path)
#Serve Images
@app.route('/img/<path:path>')
def send_img(path):
return send_from_directory('dist/img', path)
#Serve Fonts
@app.route('/webfonts/<path:path>')
def send_webfonts(path):
return send_from_directory('dist/webfonts', path)
#endregion
#Start Application
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=7020)
``` |
{
"source": "jonathanxqs/lintcode",
"score": 4
} |
#### File: jonathanxqs/lintcode/158.py
```python
class Solution:
"""
@param s: The first string
@param b: The second string
@return true or false
"""
def anagram(self, s, t):
def hashLize(s):
dicts1= dict()
for j in s:
if j in dicts1.keys():
dicts1[j]+=1
else:
dicts1[j]=0
return dicts1
# write your code here
s1=hashLize(s)
t1=hashLize(t)
if s1 == t1:
return True
return False
```
#### File: jonathanxqs/lintcode/163.py
```python
class Solution:
# @paramn n: An integer
# @return: An integer
def numTrees(self, n):
# write your code here
f=[0 for i in range(n+5)]
f[0]=1
f[1]=1
for i in range(2,n+1):
for j in range(0,i):
f[i]+=f[j]*f[i-1-j]
return f[n]
```
#### File: jonathanxqs/lintcode/99.py
```python
class Solution:
"""
@param head: The first node of the linked list.
@return: nothing
"""
def reorderList(self, head):
# write your code here
if None == head or None == head.next:
return head
pfast = head
pslow = head
while pfast.next and pfast.next.next:
pfast = pfast.next.next
pslow = pslow.next
pfast = pslow.next
pslow.next = None
pnext = pfast.next
pfast.next = None
while pnext:
q = pnext.next
pnext.next = pfast
pfast = pnext
pnext = q
tail = head
while pfast:
pnext = pfast.next
pfast.next = tail.next
tail.next = pfast
tail = tail.next.next
pfast = pnext
return head
``` |
{
"source": "JonathanYang0127/editsql",
"score": 2
} |
#### File: JonathanYang0127/editsql/evaluate_model.py
```python
import argparse
import numpy as np
from functools import partial
import os
from model_util import evaluate_utterance_sample
from model.torch_utils import per_token_accuracy
import json
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str)
args = parser.parse_args()
SQL_KEYWORDS = [
'select',
'group',
'where',
'order',
'by',
'join',
'on',
'as',
'desc',
'asc'
]
###########################################################
# Metrics #
###########################################################
def accumulate(metric_fn, data):
accuracies = []
for d in data:
value = metric_fn(d)
if value is not None:
print(value)
accuracies.append(value)
return accuracies
def compute_token_accuracy_question(data):
gold_seq = data['gold_query']
pred_seq = data['flat_prediction']
return per_token_accuracy(gold_seq, pred_seq)
def compute_string_accuracy_question(data):
gold_seq = data['gold_query']
pred_seq = data['flat_prediction']
if len(gold_seq) != len(pred_seq):
print(gold_seq, pred_seq, '\n')
return 0.0
for i, gold_token in enumerate(gold_seq):
if gold_token != pred_seq[i]:
print(gold_seq, pred_seq, '\n')
return 0.0
return 1.0
def compute_table_accuracy_question(data):
gold_seq = data['gold_query']
pred_seq = data['flat_prediction']
table_keyword = 'from'
if 'from' not in pred_seq:
table_keyword = 'select'
gold_table_idx, pred_table_idx = gold_seq.index(table_keyword), pred_seq.index(table_keyword)
gold_end_idx, pred_end_idx = len(gold_seq), len(pred_seq)
for j in range(gold_table_idx + 1, len(gold_seq)):
if gold_seq[j] in SQL_KEYWORDS and not (gold_seq[j] in ['join', 'as']):
gold_end_idx = j
break
for j in range(pred_table_idx + 1, len(pred_seq)):
if pred_seq[j] in SQL_KEYWORDS and not (pred_seq[j] in ['join', 'as']):
pred_end_idx = j
break
gold_subseq = gold_seq[gold_table_idx + 1: gold_end_idx]
pred_subseq = pred_seq[pred_table_idx + 1: pred_end_idx]
gold_tables, pred_tables = set(), set()
for element in gold_subseq:
if table_keyword == 'from' and not (element in [',', 'as', 'join']):
if not (len(element) == 2 and element[0] == 't' and element[1].isdigit()):
gold_tables.add(element)
if table_keyword == 'select' and '.' in element:
gold_tables.add(element)
for element in pred_subseq:
if table_keyword == 'from' and not (element in [',', 'as', 'join']):
if not (len(element) == 2 and element[0] == 't' and element[1].isdigit()):
pred_tables.add(element)
if table_keyword == 'select' and '.' in element:
pred_tables.add(element)
print(gold_tables, pred_tables)
if gold_tables == pred_tables:
return 1.0
return 0.0
def compute_interaction_match(data):
question_accuracies = accumulate(compute_string_accuracy_question, data)
last_interaction_start = -1
for i, d in enumerate(data):
if question_accuracies[i] is None:
continue
if d['index_in_interaction'] > 1:
question_accuracies[last_interaction_start] *= question_accuracies[i]
question_accuracies[i] = 0
else:
last_interaction_start = i
return question_accuracies
def compute_index_question_accuracy(question_accuracy_fn, index, data):
question_accuracies = accumulate(question_accuracy_fn, data)
index_question_accuracies = []
for i, d in enumerate(data):
if question_accuracies[i] is None:
continue
if d['index_in_interaction'] == index:
index_question_accuracies.append(question_accuracies[i])
return index_question_accuracies
def compute_last_question_accuracy(question_accuracy_fn, data):
question_accuracies = accumulate(question_accuracy_fn, data)
last_question_accuracies = []
for i, d in enumerate(data):
if d['index_in_interaction'] == 1 and i != 0:
last_question_accuracies.append(question_accuracies[i - 1])
last_question_accuracies.append(question_accuracies[-1])
return last_question_accuracies
METRIC_DICT = {
'token_accuracy': partial(accumulate, compute_token_accuracy_question),
'string_accuracy': partial(accumulate, compute_string_accuracy_question),
'interaction_accuracy': compute_interaction_match,
'table_match_accuracy': partial(accumulate, compute_table_accuracy_question),
'first_question_token_accuracy': partial(compute_index_question_accuracy, compute_token_accuracy_question, 1),
'first_question_string_accuracy': partial(compute_index_question_accuracy, compute_string_accuracy_question, 1),
'second_question_token_accuracy': partial(compute_index_question_accuracy, compute_token_accuracy_question, 2),
'second_question_string_accuracy': partial(compute_index_question_accuracy, compute_string_accuracy_question, 2),
'last_question_token_accuracy': partial(compute_last_question_accuracy, compute_token_accuracy_question),
'last_question_string_accuracy': partial(compute_last_question_accuracy, compute_string_accuracy_question)
}
##########################################################
# Evaluation #
##########################################################
def get_latest_model(log_dir):
latest_model, latest_version = None, -1
for root, dirs, files in os.walk(log_dir):
for f in files:
if 'save' in f:
version = int(f[5:])
if version > latest_version:
latest_model, latest_version = os.path.join(root, f), version
return latest_model
def get_predictions_file(log_dir):
return os.path.join(log_dir, 'valid_use_predicted_queries_predictions.json')
def evaluate(pred_file, metrics):
metric_values = {}
data = []
with open(pred_file, 'r') as f:
for line in f:
data.append(json.loads(line))
for metric_str in metrics:
value = METRIC_DICT[metric_str](data)
metric_values[metric_str] = value
return metric_values
pred_file = get_predictions_file(args.log_dir)
metric_values = evaluate(pred_file, ['table_match_accuracy'])#METRIC_DICT.keys())
for key, value in metric_values.items():
print(key, np.mean(value))
``` |
{
"source": "JonathanYang0127/google-research",
"score": 2
} |
#### File: google-research/dql_grasping/run_env_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import parameterized
from dql_grasping import grasping_env
from dql_grasping import policies
from dql_grasping import run_env
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class RunEnvTest(parameterized.TestCase, test.TestCase):
@parameterized.named_parameters(
('collect_1', 'collect', True),
('eval_1', 'eval', True),
('emptyRoot', 'collect', False))
def testPolicyRun(self, tag, use_root_dir):
env = grasping_env.KukaGraspingProceduralEnv(
downsample_width=48, downsample_height=48,
continuous=True, remove_height_hack=True, render_mode='GUI')
policy = policies.RandomGraspingPolicyD4()
root_dir = os.path.join(FLAGS.test_tmpdir, tag) if use_root_dir else None
run_env.run_env(env,
policy=policy,
explore_schedule=None,
episode_to_transitions_fn=None,
replay_writer=None,
root_dir=root_dir,
tag=tag,
task=0,
global_step=0,
num_episodes=10)
if __name__ == '__main__':
test.main()
```
#### File: google-research/rllim/data_loading.py
```python
import numpy as np
def synthetic_data_loading(data_name='Syn1', data_no=1000, seed=0):
"""Generates synthetic datasets.
Args:
data_name: Syn1, Syn2, Syn3
data_no: number of training and testing sets
seed: random seed
Returns:
x_train: training features
y_train: training labels
x_test: testing features
y_test: testing labels
c_test: ground truth weights
test_idx: order of testing set index based on the distance from the boundary
"""
# X generation (X ~ N(0,I))
np.random.seed(seed)
data_x = np.random.normal(0, 1, [2 * data_no, 11])
# Y and ground truth local dynamics (C) initialization
data_y = np.zeros([2 * data_no,])
data_c = np.zeros([2 * data_no, 11])
# Boundary definition
if data_name == 'Syn1':
idx0 = np.where(data_x[:, 9] < 0)[0]
idx1 = np.where(data_x[:, 9] >= 0)[0]
elif data_name == 'Syn2':
idx0 = np.where(data_x[:, 9] + np.exp(data_x[:, 10]) < 1)[0]
idx1 = np.where(data_x[:, 9] + np.exp(data_x[:, 10]) >= 1)[0]
elif data_name == 'Syn3':
idx0 = np.where(data_x[:, 9] + np.power(data_x[:, 10], 3) < 0)[0]
idx1 = np.where(data_x[:, 9] + np.power(data_x[:, 10], 3) >= 0)[0]
# Y generation
data_y[idx0] = data_x[idx0, 0] + 2 * data_x[idx0, 1]
data_y[idx1] = 0.5 * data_x[idx1, 2] + 1 * data_x[idx1, 3] + \
1 * data_x[idx1, 4] + 0.5 * data_x[idx1, 5]
# Ground truth local dynamics (C) generation
data_c[idx0, 0] = 1.0
data_c[idx0, 1] = 2.0
data_c[idx1, 2] = 0.5
data_c[idx1, 3] = 1.0
data_c[idx1, 4] = 1.0
data_c[idx1, 5] = 0.5
# Splits train/test sets
x_train = data_x[:data_no, :]
x_test = data_x[data_no:, :]
y_train = data_y[:data_no]
y_test = data_y[data_no:]
c_test = data_c[data_no:, :]
# Order of testing set index based on the distance from the boundary
if data_name == 'Syn1':
test_idx = np.argsort(np.abs(x_test[:, 9]))
elif data_name == 'Syn2':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.exp(x_test[:, 10]) - 1))
elif data_name == 'Syn3':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.power(x_test[:, 10], 3)))
# Returns datasets
return x_train, y_train, x_test, y_test, c_test, test_idx
``` |
{
"source": "jonathanyeh0723/meme-generator",
"score": 3
} |
#### File: src/AIEngine/AIEngine.py
```python
import boto3
import io
from PIL import Image, ImageDraw, ExifTags, ImageColor
class AIEngine:
"""Define AI Engine class."""
@classmethod
def detect_labels(cls, photo, bucket, target, print_labels=False, show_boxes=False) -> bool:
"""
To utilize AWS Rekognition to detect labels in the photo.
Arguments:
photo {str} -- required, photo name stored in AWS s3.
bucket {str} -- required, your unique cloud storage resource.
target {str} -- required, interested label to detect.
print_labels {bool} -- optional, to print labels detected.
show_boxes {bool} -- optional, show bounding box to label detected.
Returns:
res {bool} -- specified label detected or not.
"""
client = boto3.client('rekognition')
response = client.detect_labels(Image={'S3Object':
{'Bucket': bucket, 'Name': photo}},
MaxLabels=10)
s3_connection = boto3.resource('s3')
s3_object = s3_connection.Object(bucket, photo)
s3_response = s3_object.get()
stream = io.BytesIO(s3_response['Body'].read())
image = Image.open(stream)
imgWidth, imgHeight = image.size
draw = ImageDraw.Draw(image)
if print_labels is True:
print('Detected labels for ' + photo)
print()
for label in response['Labels']:
print("Label: " + label['Name'])
print("Confidence: " + str(label['Confidence']))
print("Instances:")
for instance in label['Instances']:
print("Bounding box")
print(" Top: " + str(instance['BoundingBox']['Top']))
print(" Left: " + str(instance['BoundingBox']['Left']))
print(" Width: " + str(instance['BoundingBox']['Width']))
print(" Height: " + str(instance['BoundingBox']['Height']))
print("Confidence: " + str(instance['Confidence']))
print()
print("Parents:")
for parent in label['Parents']:
print(" " + parent['Name'])
print("----------")
print()
if show_boxes is True:
for label in response['Labels']:
if label['Name'] == 'Cat' or label['Name'] == 'Dog':
for instance in label['Instances']:
box = instance['BoundingBox']
left = imgWidth * box['Left']
top = imgHeight * box['Top']
width = imgWidth * box['Width']
height = imgHeight * box['Height']
points = (
(left, top),
(left + width, top),
(left + width, top + height),
(left, top + height),
(left, top)
)
draw.line(points, fill='#00d400', width=2)
image.show()
image.close()
labels = []
for item in response['Labels']:
labels.append((item['Name'], item['Confidence']))
res = False
for l in labels:
if l[0] in target and l[1] > 0.85:
res = l[0] in target
return res
``` |
{
"source": "jonathanyeh0723/openvino-people-counter-app",
"score": 3
} |
#### File: jonathanyeh0723/openvino-people-counter-app/main.py
```python
import argparse
import cv2
import os
import sys
import time
import logging as log
from inference import Network
def get_args():
"""Get command line arguments for execution."""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str,
help="The location of model .xml file")
parser.add_argument("-i", "--input", required=True, type=str,
help="The path to input stream")
parser.add_argument("-d", "--device", default="CPU", type=str,
help="To specify target device for inference")
parser.add_argument("-pt", "--prob_threshold", default=0.6, type=float,
help="Probability threshold for detections filtering")
parser.add_argument("-c", "--color", default="GREEN", type=str,
help="The color to draw the bounding box")
args = parser.parse_args()
return args
def preprocessing(frame, b, c, h, w):
"""Preprocess image for inference."""
# Resize to model's input w x h
p_frame = cv2.resize(frame, (w, h))
# Transpose the layout from hwc to chw
p_frame = p_frame.transpose((2, 0, 1))
# Reshape to model's shape b x c x h x w
p_frame = p_frame.reshape((b, c, h, w))
return p_frame
def infer_on_target(args):
"""Perform inference."""
logger = log.getLogger()
# Set default input mode
single_image_mode = False
# Instantiate the current request
cur_request_id = 0
# Initialize the plugin
plugin = Network()
# Load the model and get the input data
plugin.load_model(args.model, args.device, cur_request_id)
b, c, h, w = plugin.get_input_shape()
begin_time = time.time()
if args.input == 'CAM':
input_stream = 0
elif args.input.endswith('.jpg') or args.input.endswith('.bmp'):
single_image_mode = True
input_stream = args.input
else:
input_stream = args.input
assert os.path.isfile(args.input), "Specified input file doesn't exist"
# Create VideoCapture object, and have it opened
cap = cv2.VideoCapture(input_stream)
if not cap.isOpened():
log.error("ERROR! Unable to open the video source")
# Grab the dimension of input width and height
global initial_w, initial_h
initial_w = cap.get(3)
initial_h = cap.get(4)
# Set initial parameters for counting
last_count = 0
total_count = 0
frame_count = 0
detected_frame = 0
not_counted = 0
# Iterate the frames until the video ends
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
key_pressed = cv2.waitKey(60)
frame_count += 1
if not single_image_mode:
total_info = "Total people counted: {}".format(total_count - 1)
cv2.putText(frame, total_info, (525, 15), cv2.FONT_HERSHEY_COMPLEX,
0.5, (200, 10, 10), 1)
# Preprocessing input data
p_frame = preprocessing(frame, b, c, h, w)
# Perform async inference
plugin.exec_net(cur_request_id, p_frame)
inf_start = time.time()
# Get the inference result
if plugin.wait(cur_request_id) == 0:
result = plugin.extract_output(cur_request_id)
det_time = time.time() - inf_start
# Draw the bounding boxes
frame, current_count, flag = draw_boxes(frame, result,
args, not_counted)
counted_info = "Person detected: "
cv2.putText(frame, counted_info, (525, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
if flag is True:
detected_frame += 1
not_counted = 0
cv2.putText(frame, "{}".format(flag), (680, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
if flag is False:
detected_frame = 0
not_counted += 1
cv2.putText(frame, "{}".format(flag), (680, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
inf_info = "Inference time: {:.3f}ms".format(det_time * 1000)
cv2.putText(frame, inf_info, (15, 15), cv2.FONT_HERSHEY_COMPLEX,
0.5, (200, 10, 10), 1)
frame_info = "Total frames counted: {}".format(frame_count)
cv2.putText(frame, frame_info, (15, 35), cv2.FONT_HERSHEY_COMPLEX,
0.5, (200, 10, 10), 1)
check_info = "Number of frames when person is detected: {}".format(detected_frame)
cv2.putText(frame, check_info, (15, 75), cv2.FONT_HERSHEY_COMPLEX,
0.5, (200, 10, 10), 1)
lost_info = "Lost frame from last counting: {}".format(not_counted)
cv2.putText(frame, lost_info, (15, 95), cv2.FONT_HERSHEY_COMPLEX,
0.5, (200, 10, 10), 1)
if current_count > last_count:
start = time.time()
total_count = total_count + current_count - last_count
if current_count < last_count:
duration = int(time.time() - start)
last_count = current_count
if key_pressed == 27:
break
cv2.imshow('demo', frame)
# If single image mode, write out the inference result
if single_image_mode:
cv2.imwrite('./output_image.jpeg', frame)
cv2.waitKey(0)
# Release all. Destroy all OpenCV windows.
cap.release()
cv2.destroyAllWindows()
total_time = time.time() - begin_time
logger.error("[ INFO ] Conducted program successfully.")
logger.error("[ INFO ] Total frames counted: {}".format(frame_count))
logger.error("[ INFO ] Total execution time: {:.3f} seconds".format(total_time))
def draw_boxes(frame, result, args, not_counted):
"""Draw the bounding boxes if person is detected."""
current_count = 0
detect_flag = False
color_dict = {"BLUE": (255, 0, 0), "GREEN": (0, 255, 0),
"RED": (0, 0, 255)}
if args.color:
out_color = color_dict.get(args.color)
else:
out_color = color_dict["GREEN"]
for obj in result[0][0]:
conf = obj[2]
if conf >= args.prob_threshold:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
current_count += 1
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), out_color, 3)
detect_flag = True
if detect_flag is False and not_counted < 15:
current_count = 1
return frame, current_count, detect_flag
def main():
"""Put all together. To execute the app."""
args = get_args()
infer_on_target(args)
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanyeh0723/python-tricks",
"score": 4
} |
#### File: jonathanyeh0723/python-tricks/bank_account.py
```python
class Bank:
def __init__(self, account: str, balance: int):
self.account = account
self.balance = balance
def __repr__(self):
return f'Account: {self.account}; Balance: {self.balance}'
def deposit(self, num):
self.balance = self.balance + num
print("Deposit successfully!")
def withdraw(self, num):
if self.balance >= num:
self.balance = self.balance - num
print("Withdraw successfully!")
else:
print("Not having enough money!")
if __name__ == "__main__":
jonathan = Bank('jonathan', 100)
print(jonathan)
jonathan.deposit(100)
print(jonathan)
jonathan.withdraw(50)
print(jonathan)
jonathan.withdraw(200)
print(jonathan)
``` |
{
"source": "jonathanyepez/Attrition-IBM",
"score": 3
} |
#### File: jonathanyepez/Attrition-IBM/datasetEDA.py
```python
import pandas as pd
# import numpy as np
# import matplotlib
import matplotlib.pyplot as plt
# import seaborn as sns #already imported in 'processing'
import missingno # not 1005 sure if we will use this one
import processingAttributes as processing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Preliminary Data Processing
df = pd.read_csv('EmployeeAttrition.csv') # read the file from the folder
# print(df.info()) #general information about the dataframe
# print(df.head()) #first 3 entries in the dataframe
# we have only one value in the column EmployeeCount. We can delete it
df.drop('EmployeeCount', inplace=True, axis=1)
df.drop('Over18', inplace=True, axis=1) # all employees assumed to be over18
df.drop('EmployeeNumber', inplace=True, axis=1) # get rid of the employee ID
df.drop('StandardHours', inplace=True, axis=1) # column has only one value: 80
standardHrs = 80
# Specify our categorical variables as 'category'
df['Attrition'] = df['Attrition'].astype('category')
df['BusinessTravel'] = df['BusinessTravel'].astype('category')
df['Department'] = df['Department'].astype('category')
df['Gender'] = df['Gender'].astype('category')
df['OverTime'] = df['OverTime'].astype('category')
df['EducationField'] = df['EducationField'].astype('category')
df['JobRole'] = df['JobRole'].astype('category')
df['MaritalStatus'] = df['MaritalStatus'].astype('category')
# copy of categorical features
df_categs = df.select_dtypes(include=['category']).copy()
# use label encoding to change data from categorical to int8 in the dataframe
# Encode categorical vars so that we can use feature selection algorithms
categorical_features = ['BusinessTravel', 'Department', 'Gender', 'OverTime',
'EducationField', 'JobRole', 'MaritalStatus']
for f in categorical_features:
colname = f+'_cat'
df[colname] = df[f].cat.codes
df.drop(f, axis=1, inplace=True)
df['Attrition'] = df['Attrition'].cat.codes # change yes/no to 1/0
del f, colname, categorical_features
tempVar = df['Attrition']
df.drop('Attrition', axis=1, inplace=True)
df.insert(0, 'Attrition', tempVar) # move target to the first column in the df
del tempVar # delete the temporary variable
# Checking for null values, empty cells
if df.isnull().any(axis=None):
print("\nPreview of data with null values:\nxxxxxxxxxxxxx")
print(df[df.isnull().any(axis=1)].head(3))
missingno.matrix(df)
plt.show()
# Checking if there are duplicated entries
if len(df[df.duplicated()]) > 0:
print("No. of duplicated entries: ", len(df[df.duplicated()]))
# print(df[df.duplicated(keep=False)].sort_values(by=list(df.columns)).head())
else:
print("No duplicated entries found")
# Define our variable of interest
TargetVar = 'Attrition' # the name of the column that we will be focusing on
# Running the code to get plots and preliminar information
processing.categorical_eda(df_categs, TargetVar)
# processing.numerical_eda(df)
"""
# Selecting most relevant features from dataframe to perform further analysis
try:
print("Select KBest with Mutual Info Classifier:")
processing.bestFeature_MutualInfoClassif(df, TargetVar)
print("\nSelect features based on Tree Classifier:")
processing.bestFeature_ExtraTreesClassif(df, TargetVar)
print("\nSelect features based on KBest and Chi2:")
processing.bestFeature_KBest_chi2(df, TargetVar)
except Exception as e:
print(e)
"""
# Preparing data for training and testing
X = df.iloc[:, 1:]
y = df[TargetVar] # target column -> i.e. Attrition
# feature selection
# Model Training------------------------------
# fit the model -> Logistic Regression
def trainModel(X, y, test_size=0.33):
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
model = LogisticRegression(solver='lbfgs', max_iter=5000)
model.fit(X_train, y_train)
# evaluate the model
yhat = model.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, yhat)
print('Accuracy: %.2f' % (accuracy*100))
# fit the model -> all features
print("----------\nUsing all features: ")
trainModel(X, y)
# fit the model -> chi2 features
print("----------\nUsing chi2 for selection: ")
X_trans_chi2 = processing.bestFeature_KBest_chi2(df, TargetVar)
trainModel(X_trans_chi2, y)
# fit the model -> mutual information features
print("----------\nUsing mutual information selection: ")
X_trans_mutual = processing.bestFeature_MutualInfoClassif(df, TargetVar)
trainModel(X_trans_mutual, y)
``` |
{
"source": "jonathanyxchen/quoFEM-UQPy",
"score": 3
} |
#### File: examples/exampleCalibration/postprocess.py
```python
from __future__ import division, print_function
import sys
if sys.version.startswith('2'):
range=xrange
string_types = basestring
else:
string_types = str
import numpy as np
def process_results(response):
target_data = (np.loadtxt("target.txt")).T
current_data = (np.loadtxt("node.txt")).T
diff = (current_data - target_data)**2.
__, val1, val2 = [np.sqrt(np.sum(v)) for v in diff]
print(val1, val2)
with open('results.out', 'wb') as f:
f.write("{:.6f} {:.6f}".format(val1, val2).encode('utf8'))
``` |
{
"source": "JonathanZailer/gnxi",
"score": 2
} |
#### File: gnxi/gnmi_cli_py/py_gnmicli.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import re
import ssl
import six
try:
import gnmi_pb2 # pylint: disable=g-import-not-at-top
except ImportError:
print('ERROR: Ensure you have grpcio-tools installed; eg\n'
'sudo apt-get install -y pip\n'
'sudo pip install --no-binary=protobuf -I grpcio-tools==1.15.0')
import gnmi_pb2_grpc # pylint: disable=g-import-not-at-top
__version__ = '0.2'
_RE_PATH_COMPONENT = re.compile(r'''
^
(?P<pname>[^[]+) # gNMI path name
(\[(?P<key>\w+) # gNMI path key
=
(?P<value>.*) # gNMI path value
\])?$
''', re.VERBOSE)
class Error(Exception):
"""Module-level Exception class."""
class XpathError(Error):
"""Error parsing xpath provided."""
def _create_parser():
"""Create parser for arguments passed into the program from the CLI.
Returns:
Argparse object.
"""
parser = argparse.ArgumentParser(description='gNMI CLI utility.')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, epilog='\nExample'
' GetRequest without user/password and over-riding Target certificate CN:'
'\npython py_gnmicli.py -t 127.0.0.1 -p 8080 -x \'/access-points/'
'access-point[hostname=test-ap]/\' -rcert ~/certs/target-cert.crt -o '
'openconfig.example.com')
parser.add_argument('-t', '--target', type=str, help='The gNMI Target',
required=True)
parser.add_argument('-p', '--port', type=str, help='The port the gNMI Target '
'is listening on', required=True)
parser.add_argument('-u', '--username', type=str, help='Username to use'
'when establishing a gNMI Channel to the Target',
required=False)
parser.add_argument('-w', '--password', type=str, help='Password to use'
'when establishing a gNMI Channel to the Target',
required=False)
parser.add_argument('-m', '--mode', choices=['get', 'set', 'subscribe'],
help='Mode of operation when interacting with network'
' element. Default=get', default='get')
parser.add_argument('-pkey', '--private_key', type=str, help='Fully'
'quallified path to Private key to use when establishing'
'a gNMI Channel to the Target', required=False)
parser.add_argument('-rcert', '--root_cert', type=str, help='Fully quallified'
'Path to Root CA to use when building the gNMI Channel',
required=False)
parser.add_argument('-cchain', '--cert_chain', type=str, help='Fully'
'quallified path to Certificate chain to use when'
'establishing a gNMI Channel to the Target', default=None,
required=False)
parser.add_argument('-x', '--xpath', type=str, help='The gNMI path utilized'
'in the GetRequest or Subscirbe', required=True)
parser.add_argument('-o', '--host_override', type=str, help='Use this as '
'Targets hostname/peername when checking it\'s'
'certificate CN. You can check the cert with:\nopenssl'
'x509 -in certificate.crt -text -noout', required=False)
parser.add_argument('-v', '--verbose', type=str, help='Print verbose messages'
'to the terminal', required=False)
parser.add_argument('-d', '--debug', help='Enable gRPC debugging',
required=False, action='store_true')
return parser
def _path_names(xpath):
"""Parses the xpath names.
This takes an input string and converts it to a list of gNMI Path names. Those
are later turned into a gNMI Path Class object for use in the Get/SetRequests.
Args:
xpath: (str) xpath formatted path.
Returns:
list of gNMI path names.
"""
if not xpath or xpath == '/': # A blank xpath was provided at CLI.
return []
return xpath.strip().strip('/').split('/') # Remove leading and trailing '/'.
def _parse_path(p_names):
"""Parses a list of path names for path keys.
Args:
p_names: (list) of path elements, which may include keys.
Returns:
a gnmi_pb2.Path object representing gNMI path elements.
Raises:
XpathError: Unabled to parse the xpath provided.
"""
gnmi_elems = []
for word in p_names:
word_search = _RE_PATH_COMPONENT.search(word)
if not word_search: # Invalid path specified.
raise XpathError('xpath component parse error: %s' % word)
if word_search.group('key') is not None: # A path key was provided.
gnmi_elems.append(gnmi_pb2.PathElem(name=word_search.group(
'pname'), key={word_search.group('key'): word_search.group('value')}))
else:
gnmi_elems.append(gnmi_pb2.PathElem(name=word, key={}))
return gnmi_pb2.Path(elem=gnmi_elems)
def _create_stub(creds, target, port, host_override):
"""Creates a gNMI GetRequest.
Args:
creds: (object) of gNMI Credentials class used to build the secure channel.
target: (str) gNMI Target.
port: (str) gNMI Target IP port.
host_override: (str) Hostname being overridden for Cert check.
Returns:
a gnmi_pb2_grpc object representing a gNMI Stub.
"""
if host_override:
channel = gnmi_pb2_grpc.grpc.secure_channel(target + ':' + port, creds, ((
'grpc.ssl_target_name_override', host_override,),))
else:
channel = gnmi_pb2_grpc.grpc.secure_channel(target + ':' + port, creds)
return gnmi_pb2_grpc.gNMIStub(channel)
def _get(stub, paths, username, password):
"""Create a gNMI GetRequest.
Args:
stub: (class) gNMI Stub used to build the secure channel.
paths: gNMI Path
username: (str) Username used when building the channel.
password: (str) Password used when building the channel.
Returns:
a gnmi_pb2.GetResponse object representing a gNMI GetResponse.
"""
if username: # User/pass supplied for Authentication.
return stub.Get(
gnmi_pb2.GetRequest(path=[paths], encoding='JSON_IETF'),
metadata=[('username', username), ('password', password)])
return stub.Get(gnmi_pb2.GetRequest(path=[paths], encoding='JSON_IETF'))
def _build_creds(target, port, root_cert, cert_chain, private_key):
"""Define credentials used in gNMI Requests.
Args:
target: (str) gNMI Target.
port: (str) gNMI Target IP port.
root_cert: (str) Root certificate file to use in the gRPC secure channel.
cert_chain: (str) Certificate chain file to use in the gRPC secure channel.
private_key: (str) Private key file to use in the gRPC secure channel.
Returns:
a gRPC.ssl_channel_credentials object.
"""
if not root_cert:
logging.warning('No certificate supplied, obtaining from Target')
root_cert = ssl.get_server_certificate((target, port)).encode('utf-8')
return gnmi_pb2_grpc.grpc.ssl_channel_credentials(
root_certificates=root_cert, private_key=None, certificate_chain=None)
elif not cert_chain:
logging.info('Only user/pass in use for Authentication')
return gnmi_pb2_grpc.grpc.ssl_channel_credentials(
root_certificates=six.moves.builtins.open(root_cert, 'rb').read(),
private_key=None, certificate_chain=None)
return gnmi_pb2_grpc.grpc.ssl_channel_credentials(
root_certificates=six.moves.builtins.open(root_cert, 'rb').read(),
private_key=six.moves.builtins.open(private_key, 'rb').read(),
certificate_chain=six.moves.builtins.open(cert_chain, 'rb').read())
def main():
argparser = _create_parser()
args = vars(argparser.parse_args())
mode = args['mode']
target = args['target']
port = args['port']
root_cert = args['root_cert']
cert_chain = args['cert_chain']
private_key = args['private_key']
xpath = args['xpath']
host_override = args['host_override']
user = args['username']
password = args['password']
if mode in ('set', 'subscribe'):
print('Mode %s not available in this version' % mode)
return
if args['debug']:
os.environ['GRPC_TRACE'] = 'all'
os.environ['GRPC_VERBOSITY'] = 'DEBUG'
paths = _parse_path(_path_names(xpath))
creds = _build_creds(target, port, root_cert, cert_chain, private_key)
stub = _create_stub(creds, target, port, host_override)
if mode == 'get':
print('Performing GetRequest, encoding=JSON_IETF', ' to ', target,
' with the following gNMI Path\n', '-'*25, '\n', paths)
response = _get(stub, paths, user, password)
print('The GetResponse is below\n' + '-'*25 + '\n', response)
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanzhang99/loa",
"score": 2
} |
#### File: examples/lyft_level5/ma_baseline.py
```python
import os
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json as json
from collections import defaultdict
from pyquaternion import Quaternion
from lyft_dataset_sdk.lyftdataset import LyftDataset
from loa.datum.datum_3d import make_datum_from_gt, make_datum_from_pred
from loa.datum.track import Track, TrackDatum
from prior_lyft import serialize_track, get_idx_to_rot2
from constants import LYFT_DATA_DIR, PRIOR_DIR, LOA_DATA_DIR
def process_scene(level5data, preds, scene_record, scene_idx, rot2=None, score_cutoff=0.2, seed=1):
assert seed >= 1
pred_cls_names = ['car', 'pedestrian', 'motorcycle', 'bicycle', 'other_vehicle', 'bus', 'truck']
print('Processing scene', scene_record['token'])
last_token = scene_record['last_sample_token']
sample_token = scene_record['first_sample_token']
sample_record = level5data.get('sample', sample_token)
next_token = sample_record['next']
all_tracks = []
while next_token != '':
timestamp = sample_record['timestamp']
sd_record = level5data.get('sample_data', sample_record['data']['LIDAR_TOP'])
cs_record = level5data.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
pose_record = level5data.get('ego_pose', sd_record['ego_pose_token'])
gt_data = []
for ann_token in sample_record['anns']:
ann_record = level5data.get('sample_annotation', ann_token)
ann_record['name'] = ann_record['category_name']
gt_datum = make_datum_from_gt(ann_record, pose_record, cs_record, ts=timestamp, identifier=ann_token)
gt_data.append(gt_datum)
pred_data = []
dets = preds[sample_token]
box3d = dets['box3d_lidar'].detach().cpu().numpy()
box3d[:, -1] = -box3d[:, -1] - np.pi / 2
scores = dets['scores'].detach().cpu().numpy()
labels = dets['label_preds'].detach().cpu().numpy()
pred_idens = dets['idens']
pred_trans = - np.array(cs_record['translation'])
pred_rot = Quaternion(cs_record['rotation']).inverse
for pred_idx in range(box3d.shape[0]):
pred_datum = make_datum_from_pred(
sample_token,
box3d[pred_idx],
scores[pred_idx],
pred_cls_names[labels[pred_idx]],
pred_trans,
pred_rot,
rot2=rot2,
ts=timestamp,
identifier=pred_idens[pred_idx]
)
pred_data.append(pred_datum)
for pred_datum in pred_data:
if pred_datum.score < score_cutoff:
continue
has_overlap = False
for gt_datum in gt_data:
if pred_datum.intersects(gt_datum):
has_overlap = True
break
if has_overlap:
continue
add_datum = TrackDatum({'pred': pred_datum}, [])
new_track = Track()
new_track.add_datum(timestamp, add_datum)
all_tracks.append(new_track)
sample_token = next_token
sample_record = level5data.get('sample', sample_token)
next_token = sample_record['next']
rand = np.random.RandomState(seed=seed)
rand.shuffle(all_tracks)
scored = []
for track_idx, track in enumerate(all_tracks):
cls = track.data[0].observations['pred'].cls
score = track.data[0].observations['pred'].score
elem = (track_idx, cls, score)
scored.append(elem)
df_scored = pd.DataFrame(
scored,
columns=['track_idx', 'cls', 'score']
)
df_scored = df_scored.sort_values(by=['score'], ascending=False).reset_index(drop=True)
track_dir = f'{LOA_DATA_DIR}/ma-conf/tracks/{scene_idx}'
os.makedirs(track_dir, exist_ok=True)
df_scored.to_csv(f'{track_dir}/tracks.csv')
print(df_scored)
for idx, row in df_scored.iterrows():
track_idx = int(row['track_idx'])
track = all_tracks[track_idx]
serialize_track(track, f'{track_dir}/{idx}-{track_idx}.json')
def main():
level5data = LyftDataset(
data_path=f'{LYFT_DATA_DIR}',
json_path=f'{LYFT_DATA_DIR}/data',
verbose=True
)
with open('./preds_id.p', 'rb') as f:
preds = pickle.load(f)
idx_to_rot2 = get_idx_to_rot2()
for scene_idx in range(150):
try:
process_scene(
level5data,
preds,
level5data.scene[scene_idx],
scene_idx,
rot2=idx_to_rot2[scene_idx],
score_cutoff=0.2
)
except:
pass
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanzong/dmca",
"score": 2
} |
#### File: alembic/versions/4302608638bc_add_tweet_body_to_tweet_attempt.py
```python
revision = '4302608638bc'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('tweet_body', sa.String(length=280), nullable=True))
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_recruitment_tweet_attempt', 'tweet_body')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('tweet_body', sa.String(length=280), nullable=True))
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_recruitment_tweet_attempt', 'tweet_body')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('tweet_body', sa.String(length=280), nullable=True))
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_recruitment_tweet_attempt', 'tweet_body')
# ### end Alembic commands ###
```
#### File: alembic/versions/47616107e240_add_paypal_send_id_to_twitter_user_.py
```python
revision = '47616107e240'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_metadata', sa.Column('paypal_sender_batch_id', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_metadata', 'paypal_sender_batch_id')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_metadata', sa.Column('paypal_sender_batch_id', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_metadata', 'paypal_sender_batch_id')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_metadata', sa.Column('paypal_sender_batch_id', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('twitter_user_metadata', 'paypal_sender_batch_id')
# ### end Alembic commands ###
```
#### File: alembic/versions/796efd4b6ec8_add_experiment_actions.py
```python
revision = '796efd4b6ec8'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_id', sa.String(length=64), nullable=True),
sa.Column('action_type', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('twitter_user_id', sa.String(length=64), nullable=True),
sa.Column('action_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiment_actions')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_id', sa.String(length=64), nullable=True),
sa.Column('action_type', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('twitter_user_id', sa.String(length=64), nullable=True),
sa.Column('action_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiment_actions')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_id', sa.String(length=64), nullable=True),
sa.Column('action_type', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('twitter_user_id', sa.String(length=64), nullable=True),
sa.Column('action_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiment_actions')
# ### end Alembic commands ###
```
#### File: alembic/versions/8c09a746d436_update_experiment_columns.py
```python
revision = '8c09a746d436'
down_revision = 'b6bb41e569e4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
```
#### File: alembic/versions/db36c0e57af1_june_2020_refactor.py
```python
revision = 'db36c0e57af1'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('participant_eligibility',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('study_data_json', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_record',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('user_json', sa.LargeBinary(), nullable=True),
sa.Column('initial_login_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_survey_results',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('survey_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.drop_table('twitter_user_eligibility')
op.drop_table('twitter_user_metadata')
op.drop_index('ix_twitter_users_screen_name', table_name='twitter_users')
op.drop_table('twitter_users')
op.drop_table('twitter_user_survey_results')
op.add_column('experiment_actions', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('participant_user_id', sa.String(length=64), nullable=True))
op.drop_column('experiment_actions', 'experiment_id')
op.drop_column('experiment_actions', 'twitter_user_id')
op.add_column('experiments', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('study_template', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('url_id', sa.String(length=64), nullable=False))
op.drop_index('ix_experiments_name', table_name='experiments')
op.create_unique_constraint(None, 'experiments', ['study_template'])
op.create_unique_constraint(None, 'experiments', ['experiment_name'])
op.drop_column('experiments', 'name')
op.drop_column('experiments', 'id')
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('participant_user_id', sa.String(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'twitter_user_id')
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'participant_user_id')
op.add_column('experiments', sa.Column('controller', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.BLOB(), nullable=True))
op.add_column('experiments', sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False))
op.add_column('experiments', sa.Column('name', mysql.VARCHAR(length=64), nullable=True))
op.drop_constraint(None, 'experiments', type_='unique')
op.drop_constraint(None, 'experiments', type_='unique')
op.create_index('ix_experiments_name', 'experiments', ['name'], unique=False)
op.drop_column('experiments', 'url_id')
op.drop_column('experiments', 'study_template')
op.drop_column('experiments', 'experiment_name')
op.add_column('experiment_actions', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True))
op.drop_column('experiment_actions', 'participant_user_id')
op.drop_column('experiment_actions', 'experiment_name')
op.create_table('twitter_user_survey_results',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('survey_data', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_users',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('screen_name', mysql.VARCHAR(length=256), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('lang', mysql.VARCHAR(length=32), nullable=True),
sa.Column('user_state', mysql.INTEGER(), autoincrement=False, nullable=True),
sa.Column('record_created_at', mysql.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_twitter_users_screen_name', 'twitter_users', ['screen_name'], unique=False)
op.create_table('twitter_user_metadata',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('received_lumen_notice_at', mysql.DATETIME(), nullable=True),
sa.Column('lumen_notice_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('user_json', sa.BLOB(), nullable=True),
sa.Column('assignment_json', sa.BLOB(), nullable=True),
sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('initial_login_at', mysql.DATETIME(), nullable=True),
sa.Column('completed_study_at', mysql.DATETIME(), nullable=True),
sa.Column('tweet_removed', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('paypal_sender_batch_id', mysql.VARCHAR(length=64), nullable=True),
sa.CheckConstraint('(`tweet_removed` in (0,1))', name='twitter_user_metadata_chk_2'),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_user_eligibility',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('extra_data', sa.BLOB(), nullable=True),
sa.Column('study_data_json', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.drop_table('participant_survey_results')
op.drop_table('participant_record')
op.drop_table('participant_eligibility')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('participant_eligibility',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('study_data_json', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_record',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('user_json', sa.LargeBinary(), nullable=True),
sa.Column('initial_login_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_survey_results',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('survey_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.drop_table('twitter_user_eligibility')
op.drop_table('twitter_user_metadata')
op.drop_index('ix_twitter_users_screen_name', table_name='twitter_users')
op.drop_table('twitter_users')
op.drop_table('twitter_user_survey_results')
op.add_column('experiment_actions', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('participant_user_id', sa.String(length=64), nullable=True))
op.drop_column('experiment_actions', 'experiment_id')
op.drop_column('experiment_actions', 'twitter_user_id')
op.add_column('experiments', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('study_template', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('url_id', sa.String(length=64), nullable=False))
op.drop_index('ix_experiments_name', table_name='experiments')
op.create_unique_constraint(None, 'experiments', ['study_template'])
op.create_unique_constraint(None, 'experiments', ['experiment_name'])
op.drop_column('experiments', 'name')
op.drop_column('experiments', 'id')
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('participant_user_id', sa.String(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'twitter_user_id')
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'participant_user_id')
op.add_column('experiments', sa.Column('controller', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.BLOB(), nullable=True))
op.add_column('experiments', sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False))
op.add_column('experiments', sa.Column('name', mysql.VARCHAR(length=64), nullable=True))
op.drop_constraint(None, 'experiments', type_='unique')
op.drop_constraint(None, 'experiments', type_='unique')
op.create_index('ix_experiments_name', 'experiments', ['name'], unique=False)
op.drop_column('experiments', 'url_id')
op.drop_column('experiments', 'study_template')
op.drop_column('experiments', 'experiment_name')
op.add_column('experiment_actions', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True))
op.drop_column('experiment_actions', 'participant_user_id')
op.drop_column('experiment_actions', 'experiment_name')
op.create_table('twitter_user_survey_results',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('survey_data', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_users',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('screen_name', mysql.VARCHAR(length=256), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('lang', mysql.VARCHAR(length=32), nullable=True),
sa.Column('user_state', mysql.INTEGER(), autoincrement=False, nullable=True),
sa.Column('record_created_at', mysql.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_twitter_users_screen_name', 'twitter_users', ['screen_name'], unique=False)
op.create_table('twitter_user_metadata',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('received_lumen_notice_at', mysql.DATETIME(), nullable=True),
sa.Column('lumen_notice_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('user_json', sa.BLOB(), nullable=True),
sa.Column('assignment_json', sa.BLOB(), nullable=True),
sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('initial_login_at', mysql.DATETIME(), nullable=True),
sa.Column('completed_study_at', mysql.DATETIME(), nullable=True),
sa.Column('tweet_removed', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('paypal_sender_batch_id', mysql.VARCHAR(length=64), nullable=True),
sa.CheckConstraint('(`tweet_removed` in (0,1))', name='twitter_user_metadata_chk_2'),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_user_eligibility',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('extra_data', sa.BLOB(), nullable=True),
sa.Column('study_data_json', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.drop_table('participant_survey_results')
op.drop_table('participant_record')
op.drop_table('participant_eligibility')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('participant_eligibility',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('study_data_json', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_record',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('user_json', sa.LargeBinary(), nullable=True),
sa.Column('initial_login_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.create_table('participant_survey_results',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_name', sa.String(length=64), nullable=True),
sa.Column('participant_user_id', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('survey_data', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('experiment_name', 'participant_user_id', name='_experiment_participant_uc')
)
op.drop_table('twitter_user_eligibility')
op.drop_table('twitter_user_metadata')
op.drop_index('ix_twitter_users_screen_name', table_name='twitter_users')
op.drop_table('twitter_users')
op.drop_table('twitter_user_survey_results')
op.add_column('experiment_actions', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('participant_user_id', sa.String(length=64), nullable=True))
op.drop_column('experiment_actions', 'experiment_id')
op.drop_column('experiment_actions', 'twitter_user_id')
op.add_column('experiments', sa.Column('experiment_name', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('study_template', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('url_id', sa.String(length=64), nullable=False))
op.drop_index('ix_experiments_name', table_name='experiments')
op.create_unique_constraint(None, 'experiments', ['study_template'])
op.create_unique_constraint(None, 'experiments', ['experiment_name'])
op.drop_column('experiments', 'name')
op.drop_column('experiments', 'id')
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('participant_user_id', sa.String(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'twitter_user_id')
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('twitter_user_recruitment_tweet_attempt', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False))
op.drop_column('twitter_user_recruitment_tweet_attempt', 'participant_user_id')
op.add_column('experiments', sa.Column('controller', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.BLOB(), nullable=True))
op.add_column('experiments', sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False))
op.add_column('experiments', sa.Column('name', mysql.VARCHAR(length=64), nullable=True))
op.drop_constraint(None, 'experiments', type_='unique')
op.drop_constraint(None, 'experiments', type_='unique')
op.create_index('ix_experiments_name', 'experiments', ['name'], unique=False)
op.drop_column('experiments', 'url_id')
op.drop_column('experiments', 'study_template')
op.drop_column('experiments', 'experiment_name')
op.add_column('experiment_actions', sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=True))
op.add_column('experiment_actions', sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True))
op.drop_column('experiment_actions', 'participant_user_id')
op.drop_column('experiment_actions', 'experiment_name')
op.create_table('twitter_user_survey_results',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('survey_data', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_users',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('screen_name', mysql.VARCHAR(length=256), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('lang', mysql.VARCHAR(length=32), nullable=True),
sa.Column('user_state', mysql.INTEGER(), autoincrement=False, nullable=True),
sa.Column('record_created_at', mysql.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_twitter_users_screen_name', 'twitter_users', ['screen_name'], unique=False)
op.create_table('twitter_user_metadata',
sa.Column('twitter_user_id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('received_lumen_notice_at', mysql.DATETIME(), nullable=True),
sa.Column('lumen_notice_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('user_json', sa.BLOB(), nullable=True),
sa.Column('assignment_json', sa.BLOB(), nullable=True),
sa.Column('experiment_id', mysql.VARCHAR(length=64), nullable=True),
sa.Column('initial_login_at', mysql.DATETIME(), nullable=True),
sa.Column('completed_study_at', mysql.DATETIME(), nullable=True),
sa.Column('tweet_removed', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('paypal_sender_batch_id', mysql.VARCHAR(length=64), nullable=True),
sa.CheckConstraint('(`tweet_removed` in (0,1))', name='twitter_user_metadata_chk_2'),
sa.PrimaryKeyConstraint('twitter_user_id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('twitter_user_eligibility',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('extra_data', sa.BLOB(), nullable=True),
sa.Column('study_data_json', sa.BLOB(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_0900_ai_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.drop_table('participant_survey_results')
op.drop_table('participant_record')
op.drop_table('participant_eligibility')
# ### end Alembic commands ###
``` |
{
"source": "JonathanZwiebel/connect-four",
"score": 4
} |
#### File: JonathanZwiebel/connect-four/minimax_agent.py
```python
import random
def minimax_agent_first(game, state):
return minimax_agent(game, state, 1, 2)
def minimax_agent_second(game, state):
return minimax_agent(game, state, 2, 2)
def minimax_agent(game, state, agent_index, depth):
actions = game.actions(state)
assert len(actions) > 0
scores = [minimax_value(game, game.successor(state, action), agent_index, 3 - agent_index, depth) for action in actions]
print(scores)
best_score = max(scores)
print("Best score: " + str(best_score))
best_indices = [index for index in range(len(scores)) if scores[index] == best_score]
chosen_index = random.choice(best_indices)
return actions[chosen_index]
def minimax_value(game, state, max_index, agent_index, depth):
if game.is_end(state)[0]:
winner = game.is_end(state)[1]
if winner == 0:
return 0
elif winner == max_index:
return float('inf')
else:
return -float('inf')
if depth == 0:
return 0 # No evaluation function
actions = game.actions(state)
if state[1] == max_index:
values = [minimax_value(game, game.successor(state, action), max_index, 3 - agent_index, depth) for action in actions]
return max(values)
else:
values = [minimax_value(game, game.successor(state, action), max_index, 3 - agent_index, depth - 1) for action in actions]
return min(values)
```
#### File: JonathanZwiebel/connect-four/random_agent.py
```python
import random
def random_agent(game, state):
actions = game.actions(state)
action = random.choice(actions)
print("Choosing action " + str(action))
return action
``` |
{
"source": "jonathas/face-analysis-serverless-alura",
"score": 2
} |
#### File: jonathas/face-analysis-serverless-alura/faceanalysis.py
```python
import boto3
import json
client = boto3.client('rekognition')
s3 = boto3.resource('s3')
bucketName = 'jon-images-test-ir'
def detect_faces():
detected_faces = client.index_faces(
CollectionId='faces',
DetectionAttributes=['DEFAULT'],
ExternalImageId='TEMPORARY',
Image={
'S3Object': {
'Bucket': bucketName,
'Name': '_analysis.png',
},
},
)
return detected_faces
def create_list_detected_face_id(detected_faces):
detected_face_id = []
for images in range(len(detected_faces['FaceRecords'])):
detected_face_id.append(detected_faces['FaceRecords'][images]['Face']['FaceId'])
return detected_face_id
def compare_images(detected_face_ids):
images_result = []
for ids in detected_face_ids:
images_result.append(
client.search_faces(
CollectionId='faces',
FaceId=ids,
FaceMatchThreshold=80,
MaxFaces=10,
)
)
return images_result
def format_output(images_result):
json_data = []
for face_matches in images_result:
if(len(face_matches.get('FaceMatches'))) >= 1:
profile = dict(name=face_matches['FaceMatches'][0]['Face']['ExternalImageId'],
faceMatch=round(face_matches['FaceMatches'][0]['Similarity'], 2))
json_data.append(profile)
return json_data
def publish_output(json_data):
s3_obj = s3.Object('jon-site-test-ir', 'dados.json')
s3_obj.put(Body=json.dumps(json_data))
def delete_image_collection(detected_face_ids):
client.delete_faces(
CollectionId='faces',
FaceIds=detected_face_ids,
)
def main(event, context):
detected = detect_faces()
face_id_list = create_list_detected_face_id(detected)
result = compare_images(face_id_list)
output = format_output(result)
publish_output(output)
delete_image_collection(face_id_list)
print(json.dumps(output, indent=4))
``` |
{
"source": "jonathasrc/curso-flask-1",
"score": 2
} |
#### File: delivery/ext/admin.py
```python
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from delivery.ext.db import db
from delivery.ext.db.models import Category
admin = Admin()
def init_app(app):
admin.name = "CodeFoods"
admin.template_mode = "bootstrap2"
admin.init_app(app)
# TODO: Proteger com senha
# TODO: traduzir para PTBR
admin.add_view(ModelView(Category, db.session))
``` |
{
"source": "JonathaTrue/Python_par",
"score": 3
} |
#### File: JonathaTrue/Python_par/funcionario.py
```python
class Funcionario:
def __init__(self, nome, salario, imposto):
self.nome = nome
self.salario = salario
self.imposto = imposto
def getNome(self):
return self.Nome
def setNome(self, nome):
self.nome - nome
def getSalario(self):
return self.salario
def setSalario(self, salario):
self.salario - salario
def setImposto(self, imposto):
self.imposto - imposto
def SalarioLiquido(self, salario, imposto):
self.salario = salario
self.imposto = imposto
return self.salario - self.imposto
def AumentoSalario(self, salario, liq , comis):
valor = liq + (salario * comis / 100)
return valor
``` |
{
"source": "Jonatha-Varjao/fast-api-nosql-template",
"score": 2
} |
#### File: app/middleware/db.py
```python
from app.db.mongodb import db
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
class DBConnection(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
request.state.db = db.client
response = await call_next(request)
request.state.db.close()
return response
def get_db(request: Request):
return request.state.db
``` |
{
"source": "Jonatha-Varjao/fast-api-sql-template",
"score": 2
} |
#### File: api_v1/endpoints/login.py
```python
import json
from datetime import timedelta
from fastapi import APIRouter, Body, Depends, HTTPException
from sqlalchemy.orm import Session
from starlette.responses import Response
from app.api.utils.db import get_db
from app.api.utils.security import get_current_user
from app.core import config
from app.core.jwt import create_access_token
from app.core.return_messages import codes, ptBr
from app.core.security import get_password_hash
from app.crud.user import (
authenticate,
get_by_email,
is_active,
is_admin_env
)
from app.models.msg import Msg
from app.models.token import LoginOAuth
from app.models.user import User
from app.utils import (
generate_password_reset_token,
send_reset_password_email,
verify_password_reset_token,
)
router = APIRouter()
@router.post("/login/access-token")
def login_access_token(
*,
db: Session = Depends(get_db),
data: LoginOAuth
):
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = authenticate(
db, email_or_username=data.username, password=<PASSWORD>
)
if not user:
return Response(json.dumps({
"messageCode": codes['validation'],
"message": ptBr['eIncorrectDataLogin']
}),
status_code=422)
if not is_active(user):
return Response(json.dumps({
"messageCode": codes['db'],
"message": ptBr['eUserNotActive']
}),
status_code=401)
user_response = {
"id":str(user.id),
"username":user.username,
"full_name":user.full_name,
"email":user.email,
"is_superuser":user.is_superuser
}
access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": create_access_token(
data={
"user_data": user_response,
},
expires_delta=access_token_expires
),
"token_type": "bearer",
}
@router.post("/login/test-token", response_model=User)
def test_token(current_user: User = Depends(get_current_user)):
"""
Test access token
"""
return current_user
@router.post("/password-recovery/{email}", response_model=Msg)
def recover_password(email: str, db: Session = Depends(get_db)):
"""
Password Recovery
"""
# user = get_by_email(db, email=email)
# if not user:
# return Response(json.dumps({
# "messageCode": codes['validation'],
# "title": "Dados Incorretos",
# "message": ptBr['eDontExistsThisUser']
# }),
# status_code=422)
# password_reset_token = generate_password_reset_token(email=email)
# send_reset_password_email(
# email_to=user.email, email=email, token=password_reset_token
# )
return Response(json.dumps({
"messageCode": codes['success'],
"title": "Sucesso na Operação.",
"message": ptBr['passwordRecoveryEmail']
}),
status_code=200)
@router.post("/reset-password/", response_model=Msg)
def reset_password(token: str = Body(...), new_password: str = Body(...), db: Session = Depends(get_db)):
"""
Reset password
"""
# email = verify_password_reset_token(token)
# if not email:
# return Response(json.dumps({
# "messageCode": codes['validation'],
# "title": "Erro no Token",
# "message": ptBr['eInvalidToken']
# }),
# status_code=400)
# user = get_by_email(db, email=email)
# if not user:
# return Response(json.dumps({
# "messageCode": codes['db'],
# "title": "Erro no banco de dados.",
# "message": ptBr['eUserNotFound']
# }),
# status_code=400)
# elif not is_active(user):
# return Response(json.dumps({
# "messageCode": codes['db'],
# "title": "Erro no Banco de Dados",
# "message": ptBr['eUserNotActive']
# }),
# status_code=404)
# hashed_password = get_password_hash(new_password)
# user.password = <PASSWORD>
# db.add(user)
# db.commit()
return Response(json.dumps({
"messageCode": codes['success'],
"title": "Sucesso na Operação.",
"message": ptBr['passwordRecoverySuccess']
}),
status_code=200)
```
#### File: app/middlewares/db_exceptions.py
```python
import json
from googletrans import Translator
from sqlalchemy.exc import SQLAlchemyError
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import Response
from app.core.return_messages import codes
translator = Translator()
class DBException(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
try:
response = await call_next(request)
request.state.db.close()
return response
except SQLAlchemyError as error:
print(error)
#error_json = str(error.__dict__['orig']).split('Key ')[-1]
#error_json_pt = translator.translate(error_json, dest='pt')
return Response(
json.dumps(
{"messageCode": codes['db'], "title": "Erro no Banco de Dados", "error": "error_json_pt.text"}),
status_code=422)
``` |
{
"source": "jonathf/npoly",
"score": 4
} |
#### File: numpoly/array_function/all.py
```python
from __future__ import annotations
from typing import Any, Optional, Sequence, Union
import numpy
import numpy.typing
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
@implements(numpy.all)
def all(
a: PolyLike,
axis: Union[None, int, Sequence[int]] = None,
out: Optional[numpy.ndarray] = None,
keepdims: bool = False,
**kwargs: Any,
) -> Optional[numpy.ndarray]:
"""
Test whether all array elements along a given axis evaluate to True.
Args:
a:
Input array or object that can be converted to an array.
axis:
Axis or axes along which a logical AND reduction is performed. The
default (`axis` = `None`) is to perform a logical AND over all the
dimensions of the input array. `axis` may be negative, in which
case it counts from the last to the first axis. If this is a tuple
of ints, a reduction is performed on multiple axes, instead of
a single axis or all the axes as before.
out:
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if ``dtype(out)`` is float, the result will consist of 0.0's
and 1.0's).
keepdims:
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
A new boolean or array is returned unless `out` is specified, in which
case a reference to `out` is returned.
Examples:
>>> q0 = numpoly.variable()
>>> numpoly.all(q0)
True
>>> numpoly.all(0*q0)
False
>>> numpoly.all([1, q0, 0])
False
>>> numpoly.all([[True*q0, False], [True, True]], axis=0)
array([ True, False])
"""
a = numpoly.aspolynomial(a)
coefficients = numpy.any(numpy.asarray(a.coefficients), axis=0)
index = numpy.asarray(coefficients, dtype=bool)
return numpy.all(index, axis=axis, out=out, keepdims=keepdims)
```
#### File: numpoly/array_function/apply_along_axis.py
```python
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, List
import numpy
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.apply_along_axis)
def apply_along_axis(
func1d: Callable[[PolyLike], PolyLike],
axis: int,
arr: PolyLike,
*args: Any,
**kwargs: Any,
) -> ndpoly:
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` is
a 1-D slice of `arr` along `axis`.
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
f = func1d(arr[ii+s_[:,]+kk])
Nj = f.shape
for jj in ndindex(Nj):
out[ii+jj+kk] = f[jj]
Equivalently, eliminating the inner loop, this can be expressed as::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
out[ii+s_[...,]+kk] = func1d(arr[ii+s_[:,]+kk])
Args:
func1d:
This function should accept 1-D arrays. It is applied to 1-D slices
of `arr` along the specified axis.
axis:
Axis along which `arr` is sliced.
arr:
Input array.
args:
Additional arguments to `func1d`.
kwargs:
Additional named arguments to `func1d`.
Returns:
The output array. The shape of `out` is identical to the shape of
`arr`, except along the `axis` dimension. This axis is removed, and
replaced with new dimensions equal to the shape of the return value of
`func1d`. So if `func1d` returns a scalar `out` will have one fewer
dimensions than `arr`.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> b = numpoly.polynomial([[1, 2, 3*q0],
... [3, 6*q1, 6],
... [2, 7, 9]])
>>> numpoly.apply_along_axis(numpoly.mean, 0, b)
polynomial([2.0, 2.0*q1+3.0, q0+5.0])
>>> numpoly.apply_along_axis(numpoly.mean, 1, b)
polynomial([q0+1.0, 2.0*q1+3.0, 6.0])
"""
collection: List[ndpoly] = list()
@wraps(func1d)
def wrapper_func(array):
"""Wrap func1d function."""
# Align indeterminants in case slicing changed them
array = numpoly.polynomial(
array, names=arr.indeterminants, allocation=arr.allocation)
array, _ = numpoly.align.align_indeterminants(
array, arr.indeterminants)
# Evaluate function
out = func1d(array, *args, **kwargs)
# Restore indeterminants in case func1d changed them.
out, _ = numpoly.align.align_indeterminants(out, arr.indeterminants)
# Return dummy index integer value that will be replaced with
# polynomials afterwards.
ret_val = len(collection)*numpy.ones(out.shape, dtype=int)
collection.append(out)
return ret_val
# Initiate wrapper
arr = numpoly.aspolynomial(arr)
out = numpy.apply_along_axis(wrapper_func, axis=axis, arr=arr.values)
# align exponents
polynomials = numpoly.align.align_exponents(*collection)
dtype = numpoly.result_type(*polynomials)
# Store results into new array
ret_val = numpoly.ndpoly(
exponents=polynomials[0].exponents,
shape=out.shape,
names=polynomials[0].indeterminants,
dtype=dtype,
).values
for idx, polynomial in enumerate(polynomials):
ret_val[out == idx] = polynomial.values
return numpoly.polynomial(
ret_val,
dtype=dtype,
names=polynomials[0].indeterminants,
allocation=polynomials[0].allocation,
)
```
#### File: numpoly/array_function/array_split.py
```python
from __future__ import annotations
from typing import List
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.array_split)
def array_split(
ary: PolyLike,
indices_or_sections: numpy.typing.ArrayLike,
axis: int = 0,
) -> List[ndpoly]:
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference between
these functions is that ``array_split`` allows `indices_or_sections` to be
an integer that does *not* equally divide the axis. For an array of length
l that should be split into n sections, it returns l % n sub-arrays of size
l//n + 1 and the rest of size l//n.
See Also:
split : Split an array into multiple sub-arrays of equal size.
Examples:
>>> poly = numpoly.monomial(8).reshape(2, 4)
>>> poly
polynomial([[1, q0, q0**2, q0**3],
[q0**4, q0**5, q0**6, q0**7]])
>>> parts = numpoly.array_split(poly, 3, axis=1)
>>> part1, part2, part3 = parts
>>> part1
polynomial([[1, q0],
[q0**4, q0**5]])
>>> part2
polynomial([[q0**2],
[q0**6]])
>>> part3
polynomial([[q0**3],
[q0**7]])
"""
ary = numpoly.aspolynomial(ary)
results = numpy.array_split(
ary.values, indices_or_sections=indices_or_sections, axis=axis)
return [numpoly.aspolynomial(result, names=ary.indeterminants)
for result in results]
```
#### File: numpoly/array_function/array_str.py
```python
from __future__ import annotations
from typing import Optional
import numpy
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
from .array_repr import to_string
@implements(numpy.array_str)
def array_str(
a: PolyLike,
max_line_width: Optional[int] = None,
precision: Optional[float] = None,
suppress_small: Optional[bool] = None,
) -> str:
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Args:
a:
Input array.
max_line_width:
Inserts newlines if text is longer than `max_line_width`. Defaults
to ``numpy.get_printoptions()['linewidth']``.
precision:
Floating point precision. Defaults to
``numpy.get_printoptions()['precision']``.
suppress_small:
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero. Defaults to ``numpy.get_printoptions()['suppress']``.
Returns:
The string representation of an array.
Examples:
>>> q0 = numpoly.variable()
>>> numpoly.array_str(numpoly.polynomial([1, q0]))
'[1 q0]'
>>> numpoly.array_str(numpoly.polynomial([]))
'[]'
>>> numpoly.array_str(
... numpoly.polynomial([1e-6, 4e-7*q0, 2*q0, 3]),
... precision=4,
... suppress_small=True,
... )
'[0.0 0.0 2.0*q0 3.0]'
"""
a = numpoly.aspolynomial(a)
a = to_string(a, precision=precision, suppress_small=suppress_small)
return numpy.array2string(
numpy.array(a),
max_line_width=max_line_width,
separator=" ",
formatter={"all": str},
prefix="",
suffix="",
)
```
#### File: numpoly/array_function/atleast_3d.py
```python
from __future__ import annotations
from typing import List, Union
import numpy
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.atleast_3d)
def atleast_3d(*arys: PolyLike) -> Union[ndpoly, List[ndpoly]]:
"""
View inputs as arrays with at least three dimensions.
Args:
arys:
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have three or more dimensions are
preserved.
Returns:
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view of
shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a view
of shape ``(M, N, 1)``.
Examples:
>>> numpoly.atleast_3d(numpoly.variable())
polynomial([[[q0]]])
>>> a, b = numpoly.atleast_3d(1, [2, 3])
>>> a
polynomial([[[1]]])
>>> b
polynomial([[[2],
[3]]])
"""
if len(arys) == 1:
poly = numpoly.aspolynomial(arys[0])
array = numpy.atleast_3d(poly.values)
return numpoly.aspolynomial(array, names=poly.indeterminants)
return [atleast_3d(ary) for ary in arys]
```
#### File: numpoly/array_function/concatenate.py
```python
from __future__ import annotations
from typing import Optional, Sequence
import numpy
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.concatenate)
def concatenate(
arrays: Sequence[PolyLike],
axis: int = 0,
out: Optional[ndpoly] = None,
) -> ndpoly:
"""
Join a sequence of arrays along an existing axis.
Args:
arrays:
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis:
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out:
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if
no out argument were specified.
Returns:
The concatenated array.
Examples:
>>> const = numpy.array([[1, 2], [3, 4]])
>>> poly = numpoly.variable(2).reshape(1, 2)
>>> numpoly.concatenate((const, poly), axis=0)
polynomial([[1, 2],
[3, 4],
[q0, q1]])
>>> numpoly.concatenate((const, poly.T), axis=1)
polynomial([[1, 2, q0],
[3, 4, q1]])
>>> numpoly.concatenate((const, poly), axis=None)
polynomial([1, 2, 3, 4, q0, q1])
"""
arrays = numpoly.align_exponents(*arrays)
if out is None:
coefficients = [numpy.concatenate(
[array.values[key] for array in arrays], axis=axis)
for key in arrays[0].keys]
out = numpoly.polynomial_from_attributes(
exponents=arrays[0].exponents,
coefficients=coefficients,
names=arrays[0].names,
dtype=coefficients[0].dtype,
)
else:
for key in out.keys:
if key in arrays[0].keys:
numpy.concatenate([array.values[key] for array in arrays],
out=out.values[key], axis=axis)
return out
```
#### File: numpoly/array_function/expand_dims.py
```python
from __future__ import annotations
import numpy
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.expand_dims)
def expand_dims(a: PolyLike, axis: int) -> ndpoly:
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
Args:
a:
Input array.
axis:
Position in the expanded axes where the new axis is placed.
Returns:
View of `a` with the number of dimensions increased by one.
Examples:
>>> poly = numpoly.variable(2)
>>> numpoly.expand_dims(poly, axis=0)
polynomial([[q0, q1]])
>>> numpoly.expand_dims(poly, axis=1)
polynomial([[q0],
[q1]])
"""
a = numpoly.aspolynomial(a)
out = numpy.expand_dims(a.values, axis=axis)
return numpoly.polynomial(out, names=a.indeterminants)
```
#### File: numpoly/array_function/hsplit.py
```python
from __future__ import annotations
from typing import Sequence
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.hsplit)
def hsplit(
ary: PolyLike,
indices_or_sections: numpy.typing.ArrayLike,
) -> Sequence[ndpoly]:
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent to
`split` with ``axis=1``, the array is always split along the second axis
regardless of the array dimension.
See Also:
split : Split an array into multiple sub-arrays of equal size.
Examples:
>>> poly = numpoly.monomial(8).reshape(2, 4)
>>> poly
polynomial([[1, q0, q0**2, q0**3],
[q0**4, q0**5, q0**6, q0**7]])
>>> part1, part2 = numpoly.hsplit(poly, 2)
>>> part1
polynomial([[1, q0],
[q0**4, q0**5]])
>>> part2
polynomial([[q0**2, q0**3],
[q0**6, q0**7]])
>>> part1, part2, part3 = numpoly.hsplit(poly, [1, 2])
>>> part1
polynomial([[1],
[q0**4]])
>>> part3
polynomial([[q0**2, q0**3],
[q0**6, q0**7]])
"""
ary = numpoly.aspolynomial(ary)
results = numpy.hsplit(ary.values, indices_or_sections=indices_or_sections)
return [numpoly.polynomial(
result, names=ary.indeterminants, allocation=ary.allocation)
for result in results]
```
#### File: numpoly/array_function/isfinite.py
```python
from __future__ import annotations
from typing import Any, Optional
import numpy
import numpy.typing
from ..baseclass import PolyLike
from ..dispatch import implements, simple_dispatch
@implements(numpy.isfinite)
def isfinite(
x: PolyLike,
out: Optional[numpy.ndarray] = None,
where: numpy.typing.ArrayLike = True,
**kwargs: Any,
) -> numpy.ndarray:
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Args:
x:
Input values.
out:
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where:
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the
default ``out=None``, locations within it where the condition is
False will remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
True where ``x`` is not positive infinity, negative infinity, or NaN;
false otherwise. This is a scalar if `x` is a scalar.
Notes:
Not a Number, positive infinity and negative infinity are considered to
be non-finite.
Examples:
>>> numpoly.isfinite(1)
True
>>> numpoly.isfinite(0)
True
>>> numpoly.isfinite(numpy.nan*numpoly.variable())
False
>>> numpoly.isfinite(numpy.inf)
False
>>> numpoly.isfinite(numpy.NINF)
False
>>> numpoly.isfinite([numpy.log(-1.), 1., numpy.log(0)])
array([False, True, False])
"""
out_ = simple_dispatch(
numpy_func=numpy.isfinite,
inputs=(x,),
where=where,
**kwargs
)
if out is None:
out_ = numpy.all(numpy.asarray(out_.coefficients), axis=0)
else:
out_ = numpy.all(numpy.asarray(out_.coefficients), out=out[0], axis=0)
return out_
```
#### File: numpoly/array_function/repeat.py
```python
from __future__ import annotations
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.repeat)
def repeat(
a: PolyLike,
repeats: numpy.typing.ArrayLike,
axis: int = 0,
) -> ndpoly:
"""
Repeat elements of an array.
Args:
a:
Input array.
repeats:
The number of repetitions for each element. `repeats` is
broadcasted to fit the shape of the given axis.
axis:
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns:
Output array which has the same shape as `a`, except along the
given axis.
Examples:
>>> q0 = numpoly.variable()
>>> numpoly.repeat(q0, 4)
polynomial([q0, q0, q0, q0])
>>> poly = numpoly.polynomial([[1, q0-1], [q0**2, q0]])
>>> numpoly.repeat(poly, 2)
polynomial([[1, q0-1],
[1, q0-1],
[q0**2, q0],
[q0**2, q0]])
>>> numpoly.repeat(poly, 3, axis=1)
polynomial([[1, 1, 1, q0-1, q0-1, q0-1],
[q0**2, q0**2, q0**2, q0, q0, q0]])
>>> numpoly.repeat(poly, [1, 2], axis=0)
polynomial([[1, q0-1],
[q0**2, q0],
[q0**2, q0]])
"""
a = numpoly.aspolynomial(a)
repeats = numpy.asarray(repeats)
result = numpy.repeat(a.values, repeats=repeats, axis=axis)
return numpoly.aspolynomial(result, names=a.indeterminants)
```
#### File: numpoly/array_function/where.py
```python
from __future__ import annotations
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.where)
def where(condition: numpy.typing.ArrayLike, *args: PolyLike) -> ndpoly:
"""
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments a re
provided.
Args:
condition:
Where True, yield `x`, otherwise yield `y`.
x:
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns:
An array with elements from `x` where `condition` is True,
and elements from `y` elsewhere.
Examples:
>>> poly = numpoly.variable()*numpy.arange(4)
>>> poly
polynomial([0, q0, 2*q0, 3*q0])
>>> numpoly.where([1, 0, 1, 0], 7, 2*poly)
polynomial([7, 2*q0, 7, 6*q0])
>>> numpoly.where(poly, 2*poly, 4)
polynomial([4, 2*q0, 4*q0, 6*q0])
>>> numpoly.where(poly)
(array([1, 2, 3]),)
"""
if isinstance(condition, numpoly.ndpoly):
condition = numpy.any(numpy.asarray(
condition.coefficients), 0).astype(bool)
if not args:
return numpy.where(condition)
poly1, poly2 = numpoly.align_polynomials(*args)
coefficients = [numpy.where(condition, x1, x2)
for x1, x2 in zip(poly1.coefficients, poly2.coefficients)]
dtype = numpy.result_type(poly1.dtype, poly2.dtype)
return numpoly.polynomial_from_attributes(
exponents=poly1.exponents,
coefficients=coefficients,
names=poly1.names,
dtype=dtype,
)
```
#### File: numpoly/construct/from_attributes.py
```python
from __future__ import annotations
from typing import Optional, Sequence, Tuple, Union
import numpy.typing
import numpoly
from . import clean
from ..baseclass import ndpoly
def polynomial_from_attributes(
exponents: numpy.typing.ArrayLike,
coefficients: Sequence[numpy.typing.ArrayLike],
names: Union[None, str, Tuple[str, ...], ndpoly] = None,
dtype: Optional[numpy.typing.DTypeLike] = None,
allocation: Optional[int] = None,
retain_coefficients: Optional[bool] = None,
retain_names: Optional[bool] = None,
) -> ndpoly:
"""
Construct polynomial from polynomial attributes.
Args:
exponents:
The exponents in an integer array with shape ``(N, D)``, where
``N`` is the number of terms in the polynomial sum and ``D`` is
the number of dimensions.
coefficients:
The polynomial coefficients. Must correspond to `exponents` by
having the same length ``N``.
names:
The indeterminant names, either as string names or as
simple polynomials. Must correspond to the exponents by having
length ``D``.
dtype:
The data type of the polynomial. If omitted, extract from
`coefficients`.
allocation:
The maximum number of polynomial exponents. If omitted, use
length of exponents for allocation.
retain_coefficients:
Do not remove redundant coefficients. If omitted use global
defaults.
retain_names:
Do not remove redundant names. If omitted use global defaults.
Returns:
Polynomial array with attributes determined by the input.
Examples:
>>> numpoly.ndpoly.from_attributes(
... exponents=[(0,), (1,)],
... coefficients=[[1, 0], [0, 1]],
... names="q4",
... )
polynomial([1, q4])
>>> numpoly.ndpoly.from_attributes(
... exponents=[(0, 0, 0), (1, 1, 2)],
... coefficients=[4, -1],
... names=("q2", "q4", "q10"),
... )
polynomial(-q2*q4*q10**2+4)
>>> numpoly.ndpoly.from_attributes(
... exponents=[(0,)],
... coefficients=[0],
... )
polynomial(0)
"""
exponents, coefficients, names = clean.postprocess_attributes(
exponents=exponents,
coefficients=coefficients,
names=names,
retain_coefficients=retain_coefficients,
retain_names=retain_names,
)
if coefficients:
dtype = coefficients[0].dtype if dtype is None else dtype
shape = coefficients[0].shape
else:
dtype = dtype if dtype else int
shape = ()
poly = numpoly.ndpoly(
exponents=exponents,
shape=shape,
names=names,
dtype=dtype,
allocation=allocation,
)
for key, values in zip(poly.keys, coefficients):
poly.values[key] = values
return poly
```
#### File: numpoly/construct/from_roots.py
```python
from typing import Optional, Sequence
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly
def polynomial_from_roots(
seq_of_zeros: Sequence[int],
dtype: Optional[numpy.typing.DTypeLike] = None,
) -> ndpoly:
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient is one
for the given sequence of zeros (multiple roots must be included in the
sequence as many times as their multiplicity; see Examples). A square
matrix (or array, which will be treated as a matrix) can also be given, in
which case the coefficients of the characteristic polynomial of the matrix
are returned.
Args:
seq_of_zeros:
A sequence of polynomial roots, or a square array or matrix object.
Either shape (N,) or (N, N).
dtype:
Any object that can be interpreted as a numpy data type.
Returns:
1-D polynomial which have `seq_of_zeros` as roots.
Leading coefficient is always 1.
Raises:
ValueError:
If input is the wrong shape (the input must be a 1-D or square
2-D array).
Examples:
>>> numpoly.polynomial_from_roots((0, 0, 0))
polynomial(q0**3)
>>> numpoly.polynomial_from_roots((-0.5, 0, 0.5))
polynomial(q0**3-0.25*q0)
"""
exponent = numpy.arange(len(seq_of_zeros), -1, -1, dtype=int)
basis = numpoly.variable(dtype=dtype)**exponent
return numpoly.sum(numpy.poly(seq_of_zeros)*basis)
```
#### File: numpoly/construct/polynomial.py
```python
from __future__ import annotations
from typing import Optional, Tuple, Union
import numpy
import numpy.typing
import numpoly
from .compose import compose_polynomial_array
from ..baseclass import ndpoly, PolyLike
def polynomial(
poly_like: PolyLike = 0,
names: Union[None, str, Tuple[str, ...], ndpoly] = None,
dtype: Optional[numpy.typing.DTypeLike] = None,
allocation: Optional[int] = None,
) -> ndpoly:
"""
Attempt to cast an object into a polynomial array.
Supports various casting options:
================== =======================================================
``dict`` Keys are tuples that represent polynomial exponents,
and values are numpy arrays that represents polynomial
coefficients.
``numpoly.ndpoly`` Copy of the polynomial.
``numpy.ndarray`` Constant term polynomial.
``sympy.Poly`` Convert polynomial from ``sympy`` to ``numpoly``,
if possible.
``Iterable`` Multivariate array construction.
structured array Assumes that the input are raw polynomial core and can
be used to construct a polynomial without changing the
data. Used for developer convenience.
================== =======================================================
Args:
poly_like:
Input to be converted to a `numpoly.ndpoly` polynomial type.
names:
Name of the indeterminant variables. If possible to infer from
``poly_like``, this argument will be ignored.
dtype:
Data type used for the polynomial coefficients.
allocation:
The maximum number of polynomial exponents. If omitted, use
length of exponents for allocation.
Returns:
Polynomial based on input ``poly_like``.
Examples:
>>> numpoly.polynomial({(1,): 1})
polynomial(q0)
>>> q0, q1 = numpoly.variable(2)
>>> q0**2+q0*q1+2
polynomial(q0*q1+q0**2+2)
>>> -3*q0+q0**2+q1
polynomial(q0**2+q1-3*q0)
>>> numpoly.polynomial([q0*q1, q0, q1])
polynomial([q0*q1, q0, q1])
>>> numpoly.polynomial([1, 2, 3])
polynomial([1, 2, 3])
>>> import sympy
>>> q0_, q1_ = sympy.symbols("q0, q1")
>>> numpoly.polynomial(3*q0_*q1_-4+q0_**5)
polynomial(q0**5+3*q0*q1-4)
"""
if isinstance(poly_like, dict):
poly = numpoly.ndpoly(exponents=[(0,)], shape=())
exponents, coefficients = zip(*list(poly_like.items()))
poly = numpoly.ndpoly.from_attributes(
exponents=exponents,
coefficients=coefficients,
names=names,
dtype=dtype,
allocation=allocation,
)
elif isinstance(poly_like, numpoly.ndpoly):
if names is None:
names = poly_like.names
poly = numpoly.ndpoly.from_attributes(
exponents=poly_like.exponents,
coefficients=poly_like.coefficients,
names=names,
dtype=dtype,
allocation=allocation,
)
# assume polynomial converted to structured array
elif isinstance(poly_like, numpy.ndarray) and poly_like.dtype.names:
keys = numpy.asarray(poly_like.dtype.names, dtype="U")
keys = numpy.array([key for key in keys if not key.isdigit()])
keys = numpy.array(
keys, dtype=f"U{numpy.max(numpy.char.str_len(keys))}")
exponents = keys.view(numpy.uint32)-numpoly.ndpoly.KEY_OFFSET
exponents = exponents.reshape(len(keys), -1)
coefficients = [poly_like[key] for key in poly_like.dtype.names]
poly = numpoly.ndpoly.from_attributes(
exponents=exponents,
coefficients=coefficients,
names=names,
allocation=allocation,
)
elif isinstance(poly_like, (int, float, numpy.ndarray, numpy.generic)):
poly = numpoly.ndpoly.from_attributes(
exponents=[(0,)],
coefficients=[numpy.asarray(poly_like)],
names=names,
dtype=dtype,
allocation=allocation,
)
# handler for sympy objects
elif hasattr(poly_like, "as_poly"):
poly_like = poly_like.as_poly() # type: ignore
exponents = poly_like.monoms() # type: ignore
coefficients = [
int(coeff) if coeff.is_integer else float(coeff) # type: ignore
for coeff in poly_like.coeffs() # type: ignore
]
names = [str(elem) for elem in poly_like.gens] # type: ignore
poly = numpoly.ndpoly.from_attributes(
exponents=exponents,
coefficients=coefficients,
names=names,
allocation=allocation,
)
else:
poly = compose_polynomial_array(
arrays=poly_like, # type: ignore
dtype=dtype,
allocation=allocation,
)
return poly
```
#### File: poly_function/divide/divide.py
```python
from __future__ import annotations
from typing import Any, Optional
import numpy
import numpy.typing
from ...baseclass import PolyLike, ndpoly
from ...dispatch import implements_function
from .divmod import poly_divmod
@implements_function(numpy.true_divide)
def poly_divide(
x1: PolyLike,
x2: PolyLike,
out: Optional[ndpoly] = None,
where: numpy.typing.ArrayLike = True,
**kwargs: Any,
) -> ndpoly:
"""
Return a polynomial division of the inputs, element-wise.
Note that if divisor is a polynomial, then the division could have a
remainder, as polynomial division is not exactly the same as numerical
division.
Args:
x1:
Dividend array.
x2:
Divisor array. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the
output).
out:
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where:
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
This is a scalar if both `x1` and `x2` are scalars.
Examples:
>>> q0 = numpoly.variable()
>>> poly = numpoly.polynomial([14, q0**2-3])
>>> numpoly.poly_divide(poly, 4)
polynomial([3.5, 0.25*q0**2-0.75])
>>> numpoly.poly_divide(poly, q0)
polynomial([0.0, q0])
"""
dividend, _ = poly_divmod(x1, x2, out=out, where=where, **kwargs)
return dividend
```
#### File: numpoly/poly_function/sortable_proxy.py
```python
from __future__ import annotations
import numpy
import numpoly
from ..baseclass import PolyLike
def sortable_proxy(
poly: PolyLike,
graded: bool = False,
reverse: bool = False,
) -> numpy.ndarray:
"""
Create a numerical proxy for a polynomial to allow compare.
As polynomials are not inherently sortable, values are sorted using the
highest `lexicographical` ordering. Between the values that have the same
highest ordering, the elements are sorted using the coefficients. This also
ensures that the method behaves as expected with ``numpy.ndarray``.
Args:
poly:
Polynomial to convert into something sortable.
graded:
Graded sorting, meaning the indices are always sorted by the index
sum. E.g. ``q0**2*q1**2*q2**2`` has an exponent sum of 6, and will
therefore be consider larger than both ``q0**3*q1*q2``,
``q0*q1**3*q2`` and ``q0*q1*z**3``.
reverse:
Reverses lexicographical sorting meaning that ``q0*q1**3`` is
considered bigger than ``q0**3*q1``, instead of the opposite.
Returns:
Integer array where ``a > b`` is retained for the giving rule of
``ordering``.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> poly = numpoly.polynomial(
... [q0**2, 2*q0, 3*q1, 4*q0, 5])
>>> numpoly.sortable_proxy(poly)
array([3, 1, 4, 2, 0])
>>> numpoly.sortable_proxy(poly, reverse=True)
array([4, 2, 1, 3, 0])
>>> numpoly.sortable_proxy([8, 4, 10, -100])
array([2, 1, 3, 0])
>>> numpoly.sortable_proxy([[8, 4], [10, -100]])
array([[2, 1],
[3, 0]])
"""
poly = numpoly.aspolynomial(poly)
coefficients = poly.coefficients
proxy = numpy.tile(-1, poly.shape)
largest = numpoly.lead_exponent(poly, graded=graded, reverse=reverse)
for idx in numpoly.glexsort(
poly.exponents.T, graded=graded, reverse=reverse):
indices = numpy.all(largest == poly.exponents[idx], axis=-1)
values = numpy.argsort(coefficients[idx][indices])
proxy[indices] = numpy.argsort(values)+numpy.max(proxy)+1
proxy = numpy.argsort(numpy.argsort(proxy.ravel())).reshape(proxy.shape)
return proxy
```
#### File: numpoly/poly_function/tonumpy.py
```python
from __future__ import annotations
import numpy
import numpoly
from ..baseclass import PolyLike
def tonumpy(poly: PolyLike) -> numpy.ndarray:
"""
Cast polynomial to numpy.ndarray, if possible.
Args:
poly:
polynomial to cast.
Returns:
Numpy array.
Raises:
numpoly.baseclass.FeatureNotSupported:
Only constant polynomials can be cast to numpy.ndarray.
Examples:
>>> numpoly.tonumpy(numpoly.polynomial([1, 2]))
array([1, 2])
"""
poly = numpoly.aspolynomial(poly)
if not poly.isconstant():
raise numpoly.FeatureNotSupported(
"only constant polynomials can be converted to array.")
idx = numpy.argwhere(numpy.all(poly.exponents == 0, -1)).item()
if poly.size:
return numpy.array(poly.coefficients[idx])
return numpy.array([])
```
#### File: numpoly/utils/glexsort.py
```python
from __future__ import annotations
import numpy
import numpy.typing
def glexsort(
keys: numpy.typing.ArrayLike,
graded: bool = False,
reverse: bool = False,
) -> numpy.ndarray:
"""
Sort keys using graded lexicographical ordering.
Same as ``numpy.lexsort``, but also support graded and reverse
lexicographical ordering.
Args:
keys:
Values to sort.
graded:
Graded sorting, meaning the indices are always sorted by the index
sum. E.g. ``(2, 2, 2)`` has a sum of 6, and will therefore be
consider larger than both ``(3, 1, 1)`` and ``(1, 1, 3)``.
reverse:
Reverse lexicographical sorting meaning that ``(1, 3)`` is
considered smaller than ``(3, 1)``, instead of the opposite.
Returns:
Array of indices that sort the keys along the specified axis.
Examples:
>>> indices = numpy.array([[0, 0, 0, 1, 2, 1],
... [1, 2, 0, 0, 0, 1]])
>>> indices[:, numpy.lexsort(indices)]
array([[0, 1, 2, 0, 1, 0],
[0, 0, 0, 1, 1, 2]])
>>> indices[:, numpoly.glexsort(indices)]
array([[0, 1, 2, 0, 1, 0],
[0, 0, 0, 1, 1, 2]])
>>> indices[:, numpoly.glexsort(indices, reverse=True)]
array([[0, 0, 0, 1, 1, 2],
[0, 1, 2, 0, 1, 0]])
>>> indices[:, numpoly.glexsort(indices, graded=True)]
array([[0, 1, 0, 2, 1, 0],
[0, 0, 1, 0, 1, 2]])
>>> indices[:, numpoly.glexsort(indices, graded=True, reverse=True)]
array([[0, 0, 1, 0, 1, 2],
[0, 1, 0, 2, 1, 0]])
>>> indices = numpy.array([4, 5, 6, 3, 2, 1])
>>> indices[numpoly.glexsort(indices)]
array([1, 2, 3, 4, 5, 6])
"""
keys_ = numpy.atleast_2d(keys)
if reverse:
keys_ = keys_[::-1]
indices = numpy.array(numpy.lexsort(keys_))
if graded:
indices = indices[numpy.argsort(
numpy.sum(keys_[:, indices], axis=0))].T
return indices
```
#### File: npoly/test/test_array_function.py
```python
from __future__ import division
from pytest import raises
import numpy
from numpoly import polynomial
import numpoly
X, Y = numpoly.variable(2)
def assert_equal(results, reference, c_contiguous=None,
f_contiguous=None, type_=None):
"""
Assert that a return value for a function is the same as reference.
Checks types, values, datatype, shape, C- and F-contiguous-ness.
Args:
results (numpy.ndarray, numpoly.ndpoly):
The results to check are correct.
reference (numpy.ndarray, numpoly.ndpoly):
The reference the results is checked against. Input will be cast to
the same type as `results`.
c_contiguous (Optional[bool]):
Check if `results` has correct `C_CONTIGUOUS` flag. Checked against
`reference` if not provided.
f_contiguous (Optional[bool]):
Check if `results` has correct `F_CONTIGUOUS` flag. Checked against
`reference` if not provided.
type_ (Optional[type]):
Check if `results` is correct type using `isinstance`. If not
provided, results are checked to be legal numpy or numpoly type.
"""
if type_ is None:
assert isinstance(
results, (bool, numpy.bool_, numpy.number, numpy.ndarray)), (
f"unrecognized results type: {results}")
else:
assert isinstance(results, type_), (
f"invalid results type: {results} != {type_}")
if isinstance(results, numpoly.ndpoly):
reference = numpoly.aspolynomial(reference)
else:
results = numpy.asarray(results)
reference = numpy.asarray(reference)
assert results.shape == reference.shape, (
f"shape mismatch: {results} != {reference}")
assert results.dtype == reference.dtype, (
f"dtype mismatch: {results} != {reference}")
if not isinstance(results, numpoly.ndpoly):
assert numpy.allclose(results, reference), (
f"value mismatch: {results} != {reference}")
elif results.shape:
assert numpy.all(results == reference), (
f"value mismatch: {results} != {reference}")
else:
assert results == reference, (
f"value mismatch: {results} != {reference}")
if c_contiguous is None:
c_contiguous = reference.flags["C_CONTIGUOUS"]
assert results.flags["C_CONTIGUOUS"] == c_contiguous, (
f"c_contiguous mismatch: {results} != {reference}")
if f_contiguous is None:
f_contiguous = reference.flags["F_CONTIGUOUS"]
assert results.flags["F_CONTIGUOUS"] == f_contiguous, (
f"f_contiguous mismatch: {results} != {reference}")
def test_absolute(interface):
"""Tests for numpoly.absolute."""
assert_equal(X, abs(-X))
assert_equal(abs(X), abs(-X))
assert_equal(interface.abs(polynomial([X-Y, Y-4])), [X+Y, Y+4])
def test_add(interface):
"""Tests for numpoly.add."""
assert_equal(interface.add(X, 3), 3+X)
assert_equal(interface.add(polynomial([1, X, Y]), 4), [5, 4+X, 4+Y])
assert_equal(interface.add(polynomial([0, X]), polynomial([Y, 0])), [Y, X])
assert_equal(interface.add(polynomial(
[[1, X], [Y, X*Y]]), [2, X]), [[3, 2*X], [2+Y, X+X*Y]])
def test_any(interface):
"""Tests for numpoly.any."""
poly = polynomial([[0, Y], [0, 0]])
assert_equal(interface.any(poly), True)
assert_equal(interface.any(poly, axis=0), [False, True])
assert_equal(
interface.any(poly, axis=-1, keepdims=True), [[True], [False]])
def test_all(interface):
"""Tests for numpoly.all."""
poly = polynomial([[0, Y], [X, 1]])
assert_equal(interface.all(poly), False)
assert_equal(interface.all(poly, axis=0), [False, True])
assert_equal(
interface.all(poly, axis=-1, keepdims=True), [[False], [True]])
def test_allclose(func_interface):
"""Tests for numpoly.allclose."""
poly1 = numpoly.polynomial([1e10*X, 1e-7])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-8])
assert_equal(func_interface.allclose(poly1, poly2), False, type_=bool)
poly1 = numpoly.polynomial([1e10*X, 1e-8])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-9])
assert_equal(func_interface.allclose(poly1, poly2), True, type_=bool)
poly2 = numpoly.polynomial([1e10*Y, 1e-8])
assert_equal(func_interface.allclose(poly1, poly2), False, type_=bool)
def test_amax(func_interface):
"""Tests for numpoly.amax."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.amax(poly), X**4)
assert_equal(func_interface.amax(poly, axis=0), [X**4, Y**2, X**2, Y**2])
assert_equal(func_interface.amax(poly, axis=1), [X**2, X**2, Y**2, X**4])
assert_equal(func_interface.amax(poly.reshape(2, 2, 2, 2), axis=(0, 1)),
[[X**4, Y**2], [X**2, Y**2]])
def test_amin(func_interface):
"""Tests for numpoly.amin."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.amin(poly), 1)
assert_equal(func_interface.amin(poly, axis=0), [1, 3, 2, X])
assert_equal(func_interface.amin(poly, axis=1), [1, 2, 3, Y])
assert_equal(func_interface.amin(poly.reshape(2, 2, 2, 2), axis=(0, 1)),
[[1, 3], [2, X]])
def test_argmax(func_interface):
"""Tests for numpoly.argmax."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.argmax(poly), 13)
assert_equal(func_interface.argmax(poly, axis=0), [3, 3, 1, 0])
assert_equal(func_interface.argmax(poly, axis=1), [3, 2, 1, 1])
def test_argmin(func_interface):
"""Tests for numpoly.argmin."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.argmin(poly), 0)
assert_equal(func_interface.argmin(poly, axis=0), [0, 0, 2, 1])
assert_equal(func_interface.argmin(poly, axis=1), [0, 3, 2, 2])
def test_apply_along_axis(func_interface):
"""Tests for numpoly.apply_along_axis."""
np_array = numpy.arange(9, dtype=int).reshape(3, 3)
assert_equal(
func_interface.apply_along_axis(numpy.sum, 0, np_array), [9, 12, 15])
assert_equal(
func_interface.apply_along_axis(numpy.sum, 1, np_array), [3, 12, 21])
poly1 = numpoly.polynomial([[X, X, X], [Y, Y, Y], [1, 2, 3]])
assert_equal(
func_interface.apply_along_axis(numpoly.sum, 0, poly1),
[X+Y+1, X+Y+2, X+Y+3])
assert_equal(
func_interface.apply_along_axis(numpoly.sum, 1, poly1), [3*X, 3*Y, 6])
def test_apply_over_axes(func_interface):
"""Tests for numpoly.apply_over_axes."""
np_array = numpy.arange(9).reshape(3, 3)
assert_equal(
func_interface.apply_over_axes(numpy.sum, np_array, 0), [[9, 12, 15]])
assert_equal(
func_interface.apply_over_axes(numpy.sum, np_array, 1),
[[3], [12], [21]])
poly1 = numpoly.polynomial([[X, X, X], [Y, Y, Y], [1, 2, 3]])
assert_equal(
func_interface.apply_over_axes(
numpoly.sum, poly1, 0), [[X+Y+1, X+Y+2, X+Y+3]])
assert_equal(
func_interface.apply_over_axes(
numpoly.sum, poly1, 1), [[3*X], [3*Y], [6]])
def test_around(interface):
"""Tests for numpoly.around."""
poly = 123.45*X+Y
assert_equal(interface.round(poly), 123.*X+Y)
assert_equal(interface.round(poly, decimals=1), 123.4*X+Y)
assert_equal(interface.round(poly, decimals=-2), 100.*X)
out = 5.*X+6.*Y
interface.round(poly, decimals=1, out=out)
assert_equal(out, 123.4*X+Y)
def test_array_repr(func_interface):
"""Tests for numpoly.array_repr."""
assert repr(polynomial([])).startswith("polynomial([], dtype=")
assert repr(4+6*X**2) == "polynomial(6*q0**2+4)"
assert func_interface.array_repr(4+6*X**2) == "polynomial(6*q0**2+4)"
assert (repr(polynomial([1., -5*X, 3-X**2])) ==
"polynomial([1.0, -5.0*q0, -q0**2+3.0])")
assert (func_interface.array_repr(polynomial([1., -5*X, 3-X**2])) ==
"polynomial([1.0, -5.0*q0, -q0**2+3.0])")
assert repr(polynomial([[[1, 2], [5, Y]]])) == """\
polynomial([[[1, 2],
[5, q1]]])"""
assert func_interface.array_repr(polynomial([[[1, 2], [5, Y]]])) == """\
polynomial([[[1, 2],
[5, q1]]])"""
def test_array_split(func_interface):
"""Tests for numpoly.array_split."""
test_split(func_interface)
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
part1, part2 = func_interface.array_split(poly, 2, axis=1)
assert_equal(part1, [[1, X], [X+Y, Y]])
assert_equal(part2, [[X**2], [Y]])
def test_array_str(func_interface):
"""Tests for numpoly.array_str."""
assert str(polynomial([])).startswith("[]")
assert str(4+6*X**2) == "6*q0**2+4"
assert func_interface.array_str(4+6*X**2) == "6*q0**2+4"
assert str(polynomial([1., -5*X, 3-X**2])) == "[1.0 -5.0*q0 -q0**2+3.0]"
assert func_interface.array_str(
polynomial([1., -5*X, 3-X**2])) == "[1.0 -5.0*q0 -q0**2+3.0]"
assert str(polynomial([[[1, 2], [5, Y]]])) == """\
[[[1 2]
[5 q1]]]"""
assert func_interface.array_str(polynomial([[[1, 2], [5, Y]]])) == """\
[[[1 2]
[5 q1]]]"""
def test_atleast_1d(func_interface):
"""Tests for numpoly.atleast_1d."""
polys = [X, [X], [[X]], [[[X]]]]
results = func_interface.atleast_1d(*polys)
assert isinstance(results, list)
assert_equal(results[0], [X])
assert_equal(results[1], [X])
assert_equal(results[2], [[X]])
assert_equal(results[3], [[[X]]])
def test_atleast_2d(func_interface):
"""Tests for numpoly.atleast_2d."""
polys = [X, [X], [[X]], [[[X]]]]
results = func_interface.atleast_2d(*polys)
assert isinstance(results, list)
assert_equal(results[0], [[X]])
assert_equal(results[1], [[X]])
assert_equal(results[2], [[X]])
assert_equal(results[3], [[[X]]])
def test_atleast_3d(func_interface):
"""Tests for numpoly.atleast_3d."""
polys = [X, [X], [[X]], [[[X]]]]
results = func_interface.atleast_3d(*polys)
assert isinstance(results, list)
assert_equal(results[0], [[[X]]])
assert_equal(results[1], [[[X]]])
assert_equal(results[2], [[[X]]])
assert_equal(results[3], [[[X]]])
def test_broadcast_array(func_interface):
"""Tests for numpoly.broadcast_array."""
polys = [X, [[Y, 1]], [[X], [Y]], [[X, 1], [Y, 2]]]
results = func_interface.broadcast_arrays(*polys)
assert isinstance(results, list)
assert len(results) == len(polys)
assert all(result.shape == (2, 2) for result in results)
assert_equal(results[0], [[X, X], [X, X]])
assert_equal(results[1], [[Y, 1], [Y, 1]])
assert_equal(results[2], [[X, X], [Y, Y]])
assert_equal(results[3], [[X, 1], [Y, 2]])
def test_ceil(func_interface):
"""Tests for numpoly.ceil."""
poly = polynomial([-1.7*X, X-1.5, -0.2, 3.2+1.5*X, 1.7, 2.0])
assert_equal(func_interface.ceil(poly),
[-X, -1.0+X, 0.0, 4.0+2.0*X, 2.0, 2.0])
def test_common_type(func_interface):
"""Tests for numpoly.common_type."""
assert func_interface.common_type(
numpy.array(2, dtype=numpy.float32)) == numpy.float32
assert func_interface.common_type(X) == numpy.float64
assert func_interface.common_type(
numpy.arange(3), 1j*X, 45) == numpy.complex128
def test_concatenate(func_interface):
"""Tests for numpoly.concatenate."""
poly1 = polynomial([[0, Y], [X, 1]])
assert_equal(func_interface.concatenate([poly1, poly1]),
[[0, Y], [X, 1], [0, Y], [X, 1]])
assert_equal(func_interface.concatenate([poly1, [[X*Y, 1]]], 0),
[[0, Y], [X, 1], [X*Y, 1]])
assert_equal(func_interface.concatenate([poly1, [[X*Y], [1]]], 1),
[[0, Y, X*Y], [X, 1, 1]])
assert_equal(func_interface.concatenate([poly1, poly1], 1),
[[0, Y, 0, Y], [X, 1, X, 1]])
def test_copyto(func_interface):
"""Tests for numpoly.copyto."""
poly = numpoly.polynomial([1, X, Y])
poly_ref = numpoly.polynomial([1, X, Y])
with raises(ValueError):
func_interface.copyto(poly.values, poly_ref, casting="safe")
with raises(ValueError):
numpoly.copyto(poly.values, [1, 2, 3], casting="safe")
with raises(ValueError):
numpoly.copyto(X, Y, casting="unsafe")
func_interface.copyto(poly, X)
assert_equal(poly, [X, X, X])
func_interface.copyto(poly.values, poly_ref, casting="unsafe")
assert_equal(poly, poly_ref)
func_interface.copyto(poly, 4)
assert_equal(poly, [4, 4, 4])
func_interface.copyto(poly.values, poly_ref.values, casting="unsafe")
assert_equal(poly, poly_ref)
poly = numpoly.polynomial([1, 2, 3])
func_interface.copyto(poly, [3, 2, 1], casting="unsafe")
assert_equal(poly, [3, 2, 1])
func_interface.copyto(
poly.values, numpoly.polynomial([1, 2, 3]), casting="unsafe")
assert_equal(poly, [1, 2, 3])
out = numpy.zeros(3, dtype=float)
numpoly.copyto(out, poly, casting="unsafe")
assert_equal(out, [1., 2., 3.])
def test_count_nonzero(func_interface):
"""Tests for numpoly.count_nonzero."""
poly1 = polynomial([[0, Y], [X, 1]])
poly2 = polynomial([[0, Y, X, 0, 0], [3, 0, 0, 2, 19]])
assert_equal(func_interface.count_nonzero(poly1), 3, type_=int)
assert_equal(func_interface.count_nonzero(poly1, axis=0), [1, 2])
assert_equal(func_interface.count_nonzero(poly2, axis=0), [1, 1, 1, 1, 1])
assert_equal(func_interface.count_nonzero(X), 1, type_=int)
def test_cumsum(interface):
"""Tests for numpoly.cumsum."""
poly1 = polynomial([[0, Y], [X, 1]])
assert_equal(interface.cumsum(poly1), [0, Y, X+Y, 1+X+Y])
assert_equal(interface.cumsum(poly1, axis=0), [[0, Y], [X, Y+1]])
assert_equal(interface.cumsum(poly1, axis=1), [[0, Y], [X, X+1]])
def test_det():
"""Test for numpoly.det."""
array = [[1, 2], [3, 4]]
poly = polynomial([[1, Y], [X, 1]])
assert_equal(numpoly.det([array, poly]), [-2, 1-X*Y])
assert_equal(numpy.linalg.det(poly), 1-X*Y)
assert_equal(
numpoly.det([[1, X, Y], [Y, 1, X], [X, Y, 1]]), X**3+Y**3-3*X*Y+1)
def test_diag(func_interface):
"""Tests for numpoly.diag."""
poly = polynomial([[1, 2, X], [4, Y, 6], [7, 8, X+Y]])
assert_equal(func_interface.diag(poly), [1, Y, X+Y])
assert_equal(func_interface.diag(poly, k=1), [2, 6])
assert_equal(func_interface.diag(poly, k=-1), [4, 8])
poly = polynomial([X, Y])
assert_equal(func_interface.diag(poly), [[X, 0], [0, Y]])
assert_equal(func_interface.diag(poly, k=1),
[[0, X, 0], [0, 0, Y], [0, 0, 0]])
assert_equal(func_interface.diag(poly, k=-1),
[[0, 0, 0], [X, 0, 0], [0, Y, 0]])
def test_diagonal(interface):
"""Tests for numpoly.diagonal."""
# TODO: return view instead of copy
poly = polynomial([[1, 2, X], [4, Y, 6], [7, 8, X+Y]])
assert_equal(interface.diagonal(poly), [1, Y, X+Y])
assert_equal(interface.diagonal(poly, offset=1), [2, 6])
assert_equal(interface.diagonal(poly, offset=-1), [4, 8])
poly = numpoly.monomial(27).reshape(3, 3, 3)
assert_equal(interface.diagonal(poly, axis1=0, axis2=1),
[[1, X**12, X**24],
[X, X**13, X**25],
[X**2, X**14, X**26]])
assert_equal(interface.diagonal(poly, axis1=0, axis2=2),
[[1, X**10, X**20],
[X**3, X**13, X**23],
[X**6, X**16, X**26]])
assert_equal(interface.diagonal(poly, axis1=1, axis2=2),
[[1, X**4, X**8],
[X**9, X**13, X**17],
[X**18, X**22, X**26]])
def test_diff(func_interface):
"""Tests for numpoly.diff."""
poly = polynomial([[1, 2, X], [4, Y, 6], [7, 8, X+Y]])
assert_equal(func_interface.diff(poly),
[[1, X-2], [Y-4, 6-Y], [1, X+Y-8]])
assert_equal(func_interface.diff(poly, n=2),
[[X-3], [10-2*Y], [X+Y-9]])
assert_equal(func_interface.diff(poly, axis=0),
[[3, Y-2, 6-X], [3, 8-Y, X+Y-6]])
assert_equal(func_interface.diff(poly, append=X),
[[1, X-2, 0], [Y-4, 6-Y, X-6], [1, X+Y-8, -Y]])
assert_equal(func_interface.diff(poly, prepend=Y),
[[1-Y, 1, X-2], [4-Y, Y-4, 6-Y], [7-Y, 1, X+Y-8]])
assert_equal(func_interface.diff(poly, append=X, prepend=Y),
[[1-Y, 1, X-2, 0], [4-Y, Y-4, 6-Y, X-6], [7-Y, 1, X+Y-8, -Y]])
def test_divmod(func_interface):
"""Tests for numpoly.divmod."""
array = numpy.array([7, 11])
quotient, remainder = func_interface.divmod(array, 5)
assert_equal(quotient, [1, 2])
assert_equal(remainder, [2, 1])
with raises(numpoly.FeatureNotSupported):
func_interface.divmod(array, X)
with raises(numpoly.FeatureNotSupported):
func_interface.divmod(X, X)
def test_dsplit(func_interface):
"""Tests for numpoly.dsplit."""
poly = numpoly.polynomial([[[1, X], [X+Y, Y]]])
part1, part2 = func_interface.dsplit(poly, 2)
assert_equal(part1, [[[1], [X+Y]]])
assert_equal(part2, [[[X], [Y]]])
def test_dstack(func_interface):
"""Tests for numpoly.dstack."""
poly1 = numpoly.polynomial([1, X, 2])
poly2 = numpoly.polynomial([Y, 3, 4])
assert_equal(func_interface.dstack([poly1, poly2]),
[[[1, Y], [X, 3], [2, 4]]])
def test_ediff1d(func_interface):
"""Tests for numpoly.ediff1d."""
poly1 = numpoly.polynomial([1, X, 2])
assert_equal(func_interface.ediff1d(poly1), [X-1, 2-X])
poly2 = numpoly.polynomial([Y, 3, 4])
assert_equal(func_interface.ediff1d(poly2), [3-Y, 1])
def test_expand_dims(func_interface):
"""Tests for numpoly.expand_dims."""
poly1 = numpoly.polynomial([[1, X], [Y, 2], [3, 4]])
assert func_interface.expand_dims(poly1, axis=0).shape == (1, 3, 2)
assert func_interface.expand_dims(poly1, axis=1).shape == (3, 1, 2)
assert func_interface.expand_dims(poly1, axis=2).shape == (3, 2, 1)
array = numpy.arange(12).reshape(2, 3, 2)
assert func_interface.expand_dims(array, axis=1).shape == (2, 1, 3, 2)
def test_equal(interface):
"""Tests for numpoly.equal."""
poly = polynomial([[0, 2+Y], [X, 2]])
assert_equal(interface.equal(X, X), True)
assert_equal(interface.equal(X, [X]), [True])
assert_equal([X] == X, [True])
assert_equal(interface.equal(X, Y), False)
assert_equal(([X, 2+Y] == poly), [[False, True], [True, False]])
assert_equal(interface.equal(numpoly.polynomial([X, 2+Y]), poly),
[[False, True], [True, False]])
assert_equal((X == poly), [[False, False], [True, False]])
assert_equal(interface.equal(X, poly), [[False, False], [True, False]])
assert_equal(poly == poly.T, [[True, False], [False, True]])
assert_equal(interface.equal(poly, poly.T), [[True, False], [False, True]])
def test_floor(func_interface):
"""Tests for numpoly.floor."""
poly = polynomial([-1.7*X, X-1.5, -0.2, 3.2+1.5*X, 1.7, 2.0])
assert_equal(func_interface.floor(poly),
[-2.0*X, -2.0+X, -1.0, 3.0+X, 1.0, 2.0])
def test_floor_divide(interface):
"""Tests for numpoly.floor_divide."""
poly = polynomial([[0., 2.*Y], [X, 2.]])
assert_equal(interface.floor_divide(poly, 2), [[0., Y], [0., 1]])
assert_equal(interface.floor_divide(poly, [1., 2.]), [[0., Y], [X, 1]])
assert_equal(interface.floor_divide(
poly, [[1., 2.], [2., 1.]]), [[0., Y], [0., 2.]])
with raises(numpoly.FeatureNotSupported):
interface.floor_divide(poly, X)
with raises(numpoly.FeatureNotSupported):
poly.__floordiv__(poly)
with raises(numpoly.FeatureNotSupported):
poly.__rfloordiv__(poly)
out = numpoly.ndpoly(
exponents=poly.exponents,
shape=(2, 2),
names=("q0", "q1"),
dtype=float,
)
numpoly.floor_divide(poly, 2, out=out)
assert_equal(out, [[0., Y], [0., 1.]])
def test_full(func_interface):
"""Tests for numpoly.full."""
assert_equal(numpoly.full((3,), X), [X, X, X])
assert_equal(numpoly.aspolynomial(
func_interface.full((3,), 1.*X)), [1.*X, X, X])
assert_equal(numpoly.full((3,), Y, dtype=float), [1.*Y, Y, Y])
if func_interface is numpy: # fails in numpy, but only with func dispatch.
with raises(ValueError, ):
assert_equal(numpy.full((3,), Y, dtype=float), [1.*Y, Y, Y])
raise ValueError
def test_full_like(func_interface):
"""Tests for numpoly.full_like."""
poly = numpoly.polynomial([1, X, 2])
assert_equal(func_interface.full_like(poly, X), [X, X, X])
assert_equal(numpoly.full_like([1, X, 2], X), [X, X, X])
poly = numpoly.polynomial([1., X, 2])
assert_equal(func_interface.full_like(poly, Y), [1.*Y, Y, Y])
def test_greater(interface):
"""Tests for numpoly.greater."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.greater(poly, X),
[[False, False, False, True],
[True, True, True, False],
[False, True, False, False],
[True, True, True, True]])
assert_equal(interface.greater(poly, Y),
[[False, False, False, True],
[False, False, True, False],
[False, True, False, False],
[True, True, False, False]])
def test_greater_equal(interface):
"""Tests for numpoly.greater_equal."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.greater_equal(poly, X),
[[False, True, False, True],
[True, True, True, False],
[False, True, False, True],
[True, True, True, True]])
assert_equal(interface.greater_equal(poly, Y),
[[False, False, False, True],
[True, False, True, False],
[False, True, False, False],
[True, True, True, False]])
def test_hsplit(func_interface):
"""Tests for numpoly.hsplit."""
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
part1, part2, part3 = func_interface.hsplit(poly, 3)
assert_equal(part1, [[1], [X+Y]])
assert_equal(part2, [[X], [Y]])
assert_equal(part3, [[X**2], [Y]])
def test_hstack(func_interface):
"""Tests for numpoly.hstack."""
poly1 = numpoly.polynomial([1, X, 2])
poly2 = numpoly.polynomial([Y, 3, 4])
assert_equal(func_interface.hstack([poly1, poly2]),
[1, X, 2, Y, 3, 4])
def test_less(interface):
"""Tests for numpoly.less."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.less(poly, X),
[[True, False, True, False],
[False, False, False, True],
[True, False, True, False],
[False, False, False, False]])
assert_equal(interface.less(poly, Y),
[[True, True, True, False],
[False, True, False, True],
[True, False, True, True],
[False, False, False, True]])
def test_less_equal(interface):
"""Tests for numpoly.less_equal."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.less_equal(poly, X),
[[True, True, True, False],
[False, False, False, True],
[True, False, True, True],
[False, False, False, False]])
assert_equal(interface.less_equal(poly, Y),
[[True, True, True, False],
[True, True, False, True],
[True, False, True, True],
[False, False, True, True]])
def test_inner(func_interface):
"""Tests for numpoly.inner."""
poly1, poly2 = polynomial([[0, Y], [X+1, 1]])
assert_equal(func_interface.inner(poly1, poly2), Y)
def test_isclose(func_interface):
"""Tests for numpoly.isclose."""
poly1 = numpoly.polynomial([1e10*X, 1e-7])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-8])
assert_equal(func_interface.isclose(poly1, poly2), [True, False])
poly1 = numpoly.polynomial([1e10*X, 1e-8])
poly2 = numpoly.polynomial([1.00001e10*X, 1e-9])
assert_equal(func_interface.isclose(poly1, poly2), [True, True])
poly2 = numpoly.polynomial([1e10*Y, 1e-8])
assert_equal(func_interface.isclose(poly1, poly2), [False, True])
def test_isfinite(func_interface):
"""Tests for numpoly.isfinite."""
assert_equal(func_interface.isfinite(X), True)
assert_equal(func_interface.isfinite(numpy.nan*X), False)
poly = numpoly.polynomial([numpy.log(-1.), X, numpy.log(0)])
assert_equal(func_interface.isfinite(poly), [False, True, False])
out = numpy.ones(3, dtype=bool)
print(out)
func_interface.isfinite(poly, out=(out,))
print(out)
assert_equal(out, [False, True, False])
def test_logical_and(func_interface):
"""Tests for numpoly.logical_and."""
poly1 = numpoly.polynomial([0, X])
poly2 = numpoly.polynomial([1, X])
poly3 = numpoly.polynomial([0, Y])
assert_equal(func_interface.logical_and(1, poly1), [False, True])
assert_equal(1 and poly1, poly1)
assert_equal(func_interface.logical_and(1, poly2), [True, True])
assert_equal(1 and poly2, poly2)
assert_equal(func_interface.logical_and(poly2, poly3), [False, True])
def test_logical_or(func_interface):
"""Tests for numpoly.logical_or."""
poly1 = numpoly.polynomial([0, X])
poly2 = numpoly.polynomial([1, X])
poly3 = numpoly.polynomial([0, Y])
assert_equal(func_interface.logical_or(1, poly1), [True, True])
assert_equal(1 or poly1, 1, type_=int)
assert_equal(func_interface.logical_or(0, poly1), [False, True])
assert_equal(0 or poly1, poly1)
assert_equal(func_interface.logical_or(poly2, poly3), [True, True])
def test_matmul(func_interface):
"""Tests for numpoly.matmul."""
poly1 = numpoly.polynomial([[0, X], [1, Y]])
poly2 = numpoly.polynomial([X, 2])
assert_equal(func_interface.matmul(
poly1, poly2), [[X**2, 2*X], [X+X*Y, 2+2*Y]])
assert_equal(func_interface.matmul(
numpy.ones((2, 5, 6, 4)), numpy.ones((2, 5, 4, 3))),
4*numpy.ones((2, 5, 6, 3)))
with raises(ValueError):
func_interface.matmul(poly1, 4)
with raises(ValueError):
func_interface.matmul(3, poly2)
def test_mean(interface):
"""Tests for numpoly.mean."""
poly = numpoly.polynomial([[1, 2*X], [3*Y+X, 4]])
assert_equal(interface.mean(poly), 1.25+0.75*Y+0.75*X)
assert_equal(interface.mean(poly, axis=0), [0.5+1.5*Y+0.5*X, 2.0+X])
assert_equal(interface.mean(poly, axis=1), [0.5+X, 2.0+1.5*Y+0.5*X])
def test_max(interface):
"""Tests for numpoly.max."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.max(poly), X**4)
assert_equal(interface.max(poly, axis=0), [X**4, Y**2, X**2, Y**2])
assert_equal(interface.max(poly, axis=1), [X**2, X**2, Y**2, X**4])
assert_equal(interface.max(poly.reshape(2, 2, 2, 2), axis=(0, 1)),
[[X**4, Y**2], [X**2, Y**2]])
def test_maximum(func_interface):
"""Tests for numpoly.maximum."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.maximum(poly, X),
[[X, X, X, X**2], [Y, Y-1, Y**2, X],
[X, X**2, X, X], [Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.maximum(poly, Y),
[[Y, Y, Y, X**2], [Y, Y, Y**2, Y],
[Y, X**2, Y, Y], [Y**2, X**4, Y, Y]])
def test_min(interface):
"""Tests for numpoly.min."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(interface.min(poly), 1)
assert_equal(interface.min(poly, axis=0), [1, 3, 2, X])
assert_equal(interface.min(poly, axis=1), [1, 2, 3, Y])
assert_equal(interface.min(poly.reshape(2, 2, 2, 2), axis=(0, 1)),
[[1, 3], [2, X]])
def test_minimum(func_interface):
"""Tests for numpoly.minimum."""
poly = numpoly.polynomial([[1, X, X-1, X**2],
[Y, Y-1, Y**2, 2],
[X-1, X**2, 3, X],
[Y**2, X**4, Y, Y-1]])
assert_equal(func_interface.minimum(poly, X),
[[1, X, X-1, X], [X, X, X, 2],
[X-1, X, 3, X], [X, X, X, X]])
assert_equal(func_interface.minimum(poly, Y),
[[1, X, X-1, Y], [Y, Y-1, Y, 2],
[X-1, Y, 3, X], [Y, Y, Y, Y-1]])
def test_moveaxis():
"""Tests for numpoly.moveaxis."""
# np.moveaxis dispatching doesn't seem to work
x = numpy.arange(6).reshape(1, 2, 3)
assert_equal(numpoly.moveaxis(x, 0, -1),
[[[0], [1], [2]], [[3], [4], [5]]])
assert_equal(numpoly.moveaxis(x, [0, 2], [2, 0]),
[[[0], [3]], [[1], [4]], [[2], [5]]])
def test_multiply(func_interface):
"""Tests for numpoly.multiply."""
poly = polynomial([[0, 2+Y], [X, 2]])
assert_equal(2*poly, [[0, 4+2*Y], [2*X, 4]])
assert_equal(func_interface.multiply(2, poly), [[0, 4+2*Y], [2*X, 4]])
assert_equal([X, 1]*poly, [[0, 2+Y], [X*X, 2]])
assert_equal(func_interface.multiply([X, 1], poly), [[0, 2+Y], [X*X, 2]])
assert_equal([[X, 1], [Y, 0]]*poly, [[0, 2+Y], [X*Y, 0]])
assert_equal(
func_interface.multiply([[X, 1], [Y, 0]], poly), [[0, 2+Y], [X*Y, 0]])
def test_negative(func_interface):
"""Tests for numpoly.negative."""
poly = polynomial([[X, -Y], [-4, Y]])
assert_equal(-(X-Y-1), 1-X+Y)
assert_equal(func_interface.negative(X-Y-1), 1-X+Y)
assert_equal(func_interface.negative(poly), [[-X, Y], [4, -Y]])
def test_nonzero(interface):
"""Tests for numpoly.nonzero."""
poly = polynomial([[3*X, 0, 0], [0, 4*Y, 0], [5*X+Y, 6*X, 0]])
assert_equal(poly[interface.nonzero(poly)], [3*X, 4*Y, 5*X+Y, 6*X])
results = interface.nonzero(X)
assert_equal(results[0], [0])
def test_not_equal(interface):
"""Tests for numpoly.not_equal."""
poly = polynomial([[0, 2+Y], [X, 2]])
assert_equal(([X, 2+Y] != poly), [[True, False], [False, True]])
assert_equal(interface.not_equal(numpoly.polynomial([X, 2+Y]), poly),
[[True, False], [False, True]])
assert_equal((X != poly), [[True, True], [False, True]])
assert_equal(interface.not_equal(X, poly), [[True, True], [False, True]])
assert_equal(poly != poly.T, [[False, True], [True, False]])
assert_equal(
interface.not_equal(poly, poly.T), [[False, True], [True, False]])
def test_ones_like(func_interface):
"""Tests for numpoly.ones_like."""
poly = numpoly.polynomial([1, X, 2])
assert_equal(func_interface.ones_like(poly), [1, 1, 1])
assert_equal(numpoly.ones_like([1, X, 2]), [1, 1, 1])
poly = numpoly.polynomial([1., X, 2])
assert_equal(func_interface.ones_like(poly), [1., 1., 1.])
def test_outer(func_interface):
"""Tests for numpoly.outer."""
poly1, poly2 = polynomial([[0, Y], [X+1, 1]])
assert_equal(func_interface.outer(poly1, poly2), [[0, 0], [X*Y+Y, Y]])
def test_positive(func_interface):
"""Tests for numpoly.positive."""
poly = polynomial([[0, Y], [X, 1]])
assert_equal(poly, +poly)
assert_equal(poly, func_interface.positive(poly))
assert poly is not +poly
assert poly is not func_interface.positive(poly)
def test_power(func_interface):
"""Tests for numpoly.power."""
poly = polynomial([[0, Y], [X-1, 2]])
assert_equal(X**[2], [X**2])
assert_equal(func_interface.power(X, [2]), [X**2])
assert_equal(polynomial([X])**[2], [X**2])
assert_equal(func_interface.power(polynomial([X]), [2]), [X**2])
assert_equal(polynomial([X, Y])**[2], [X**2, Y**2])
assert_equal(func_interface.power(polynomial([X, Y]), [2]), [X**2, Y**2])
assert_equal(polynomial([X])**[1, 2], [X, X**2])
assert_equal(func_interface.power(polynomial([X]), [1, 2]), [X, X**2])
assert_equal((X*Y)**[0, 1, 2, 3], [1, X*Y, X**2*Y**2, X**3*Y**3])
assert_equal(func_interface.power(
X*Y, [0, 1, 2, 3]), [1, X*Y, X**2*Y**2, X**3*Y**3])
assert_equal(poly ** 2, [[0, Y**2], [X*X-2*X+1, 4]])
assert_equal(func_interface.power(poly, 2), [[0, Y**2], [X*X-2*X+1, 4]])
assert_equal(poly ** [1, 2], [[0, Y**2], [X-1, 4]])
assert_equal(func_interface.power(poly, [1, 2]), [[0, Y**2], [X-1, 4]])
assert_equal(poly ** [[1, 2], [2, 1]], [[0, Y**2], [X*X-2*X+1, 2]])
assert_equal(func_interface.power(
poly, [[1, 2], [2, 1]]), [[0, Y**2], [X*X-2*X+1, 2]])
def test_prod(interface):
"""Tests for numpoly.prod."""
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
assert_equal(interface.prod(poly), X**3*Y**3+X**4*Y**2)
assert_equal(interface.prod(poly, axis=0), [Y+X, X*Y, X**2*Y])
def test_remainder(func_interface):
"""Tests for numpoly.remainder."""
assert_equal(func_interface.remainder([7, 11], 5), [2, 1])
with raises(numpoly.FeatureNotSupported):
func_interface.remainder(X, X)
with raises(numpoly.FeatureNotSupported):
func_interface.remainder([1, 2], X)
def test_repeat(func_interface):
"""Tests for numpoly.repeat."""
poly = numpoly.polynomial([[1, X-1], [X**2, X]])
assert_equal(func_interface.repeat(poly, 2),
[[1, -1+X], [1, -1+X], [X**2, X], [X**2, X]])
assert_equal(func_interface.repeat(poly, 3, axis=1),
[[1, 1, 1, -1+X, -1+X, -1+X],
[X**2, X**2, X**2, X, X, X]])
assert_equal(numpoly.repeat(poly, [1, 2], axis=0),
[[1, -1+X], [X**2, X], [X**2, X]])
def test_reshape(interface):
"""Tests for numpoly.reshape."""
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
assert_equal(interface.reshape(poly, (3, 2)),
[[1, X], [X**2, X+Y], [Y, Y]])
assert_equal(interface.reshape(poly, 6),
[1, X, X**2, X+Y, Y, Y])
def test_result_type(func_interface):
"""Tests for numpoly.result_type."""
dtypes = ["uint8", "int16", "float32", "complex64"]
dtypes = [numpy.dtype(dtype) for dtype in dtypes]
for idx, dtype1 in enumerate(dtypes):
for dtype2 in dtypes[idx:]:
assert func_interface.result_type(3, dtype1, dtype2) == dtype2
assert func_interface.result_type(
numpoly.variable(dtype=dtype1),
numpy.arange(3, dtype=dtype2),
) == dtype2
def test_rint(func_interface):
"""Tests for numpoly.rint."""
poly = numpoly.polynomial([-1.7*X, X-1.5])
assert_equal(func_interface.rint(poly), [-2.*X, X-2.])
def test_roots(func_interface):
func_interface = numpoly
poly = [3.2, 2, 1]
assert_equal(func_interface.roots(poly),
[-0.3125+0.46351241j, -0.3125-0.46351241j])
poly = 3.2*Y**2+2*Y+1
assert_equal(func_interface.roots(poly),
[-0.3125+0.46351241j, -0.3125-0.46351241j])
with raises(ValueError):
func_interface.roots(X*Y)
with raises(ValueError):
func_interface.roots([[X, 1], [2, X]])
def test_split(func_interface):
"""Tests for numpoly.split."""
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
part1, part2 = func_interface.split(poly, 2, axis=0)
assert_equal(part1, [[1, X, X**2]])
assert_equal(part2, [[X+Y, Y, Y]])
part1, part2, part3 = func_interface.split(poly, 3, axis=1)
assert_equal(part1, [[1], [X+Y]])
assert_equal(part2, [[X], [Y]])
assert_equal(part3, [[X**2], [Y]])
def test_square(func_interface):
"""Tests for numpoly.square."""
assert_equal(func_interface.square(X+Y), X**2+2*X*Y+Y**2)
assert_equal((1+X)**2, 1+2*X+X**2)
def test_stack(func_interface):
"""Tests for numpoly.stack."""
poly = polynomial([1, X, Y])
assert_equal(
func_interface.stack([poly, poly], axis=0), [[1, X, Y], [1, X, Y]])
assert_equal(
func_interface.stack([poly, poly], axis=1), [[1, 1], [X, X], [Y, Y]])
def test_subtract(func_interface):
"""Tests for numpoly.subtract."""
assert_equal(-X+3, 3-X)
assert_equal(4 - polynomial([1, X, Y]), [3, 4-X, 4-Y])
assert_equal(
func_interface.subtract(4, polynomial([1, X, Y])), [3, 4-X, 4-Y])
assert_equal(polynomial([0, X]) - polynomial([Y, 0]), [-Y, X])
assert_equal(func_interface.subtract(polynomial([0, X]), [Y, 0]), [-Y, X])
assert_equal(polynomial([[1, X], [Y, X*Y]]) - [2, X],
[[-1, 0], [Y-2, X*Y-X]])
assert_equal(
func_interface.subtract(polynomial([[1, X], [Y, X*Y]]), [2, X]),
[[-1, 0], [Y-2, X*Y-X]])
def test_sum(interface):
"""Tests for numpoly.sum."""
poly = polynomial([[1, 5*X], [X+3, -Y]])
assert_equal(interface.sum(poly), -Y+X*6+4)
assert_equal(interface.sum(poly, axis=0), [X+4, -Y+X*5])
assert_equal(
interface.sum(poly, axis=-1, keepdims=True), [[X*5+1], [X-Y+3]])
def test_transpose(func_interface):
"""Tests for numpoly.transpose."""
poly = numpoly.polynomial([[1, X-1], [X**2, X]])
assert_equal(func_interface.transpose(poly),
[[1, X**2], [X-1, X]])
assert_equal(
poly.T, [[1, X**2], [X-1, X]], c_contiguous=False, f_contiguous=True)
def test_true_divide(func_interface):
"""Tests for numpoly.true_divide."""
poly = polynomial([[0, Y], [X, 1]])
assert_equal(func_interface.true_divide(
poly, 2), polynomial([[0, 0.5*Y], [0.5*X, 0.5]]))
assert_equal(func_interface.true_divide(
poly, [1, 2]), [[0, 0.5*Y], [X, 0.5]])
assert_equal(func_interface.true_divide(
poly, [[1, 2], [2, 1]]), [[0, 0.5*Y], [0.5*X, 1]])
with raises(numpoly.FeatureNotSupported):
func_interface.true_divide(poly, X)
def test_vsplit(func_interface):
"""Tests for numpoly.vsplit."""
poly = numpoly.polynomial([[1, X, X**2], [X+Y, Y, Y]])
part1, part2 = func_interface.vsplit(poly, 2)
assert_equal(part1, [[1, X, X**2]])
assert_equal(part2, [[X+Y, Y, Y]])
def test_vstack(func_interface):
"""Tests for numpoly.vstack."""
poly1 = numpoly.polynomial([1, X, 2])
poly2 = numpoly.polynomial([Y, 3, 4])
assert_equal(func_interface.vstack([poly1, poly2]), [[1, X, 2], [Y, 3, 4]])
def test_where(func_interface):
"""Tests for numpoly.where."""
poly1 = numpoly.polynomial([0, 4, 0])
poly2 = numpoly.polynomial([Y, 0, X])
assert_equal(func_interface.where([0, 1, 0], poly1, poly2), [Y, 4, X])
assert_equal(func_interface.where([1, 0, 1], poly1, poly2), [0, 0, 0])
assert_equal(func_interface.where(poly1)[0], [1])
assert_equal(func_interface.where(poly2)[0], [0, 2])
def test_zeros_like(func_interface):
"""Tests for numpoly.zeros_like."""
poly = numpoly.polynomial([1, X, 2])
assert_equal(func_interface.zeros_like(poly), [0, 0, 0])
assert_equal(numpoly.zeros_like([1, X, 2]), [0, 0, 0])
poly = numpoly.polynomial([1., X, 2])
assert_equal(func_interface.zeros_like(poly), [0., 0., 0.])
```
#### File: npoly/test/test_dispatch.py
```python
from numpoly import dispatch
def test_collection_sizes():
collection_diff = set(dispatch.FUNCTION_COLLECTION).symmetric_difference(dispatch.UFUNC_COLLECTION)
```
#### File: npoly/test/test_save_load.py
```python
from tempfile import TemporaryFile
import pickle
import pytest
import numpy
import numpoly
X, Y, Z = numpoly.variable(3)
ARRAY = numpy.array([1, 2, 3])
POLY = numpoly.polynomial([1, X, Z**2-1])
def test_save(func_interface):
outfile = TemporaryFile()
func_interface.save(outfile, X)
func_interface.save(outfile, ARRAY)
func_interface.save(outfile, POLY)
outfile.seek(0)
assert numpy.all(numpoly.load(outfile) == X)
assert numpy.all(numpoly.load(outfile) == ARRAY)
assert numpy.all(numpoly.load(outfile) == POLY)
with open("/tmp/numpoly_save.npy", "wb") as dst:
func_interface.save(dst, X)
func_interface.save(dst, ARRAY)
func_interface.save(dst, POLY)
with open("/tmp/numpoly_save.npy", "rb") as src:
assert numpoly.load(src) == X
assert numpy.all(numpoly.load(src) == ARRAY)
assert numpy.all(numpoly.load(src) == POLY)
def test_savez(func_interface):
outfile = TemporaryFile()
func_interface.savez(outfile, x=X, array=ARRAY, poly=POLY)
outfile.seek(0)
results = numpoly.load(outfile)
assert results["x"] == X
assert numpy.all(results["array"] == ARRAY)
assert numpy.all(results["poly"] == POLY)
with open("/tmp/numpoly_savez.npy", "wb") as dst:
func_interface.savez(dst, x=X, array=ARRAY, poly=POLY)
with open("/tmp/numpoly_savez.npy", "rb") as src:
results = numpoly.load(src)
assert results["x"] == X
assert numpy.all(results["array"] == ARRAY)
assert numpy.all(results["poly"] == POLY)
def test_savez_compressed(func_interface):
outfile = TemporaryFile()
func_interface.savez_compressed(outfile, x=X, array=ARRAY, poly=POLY)
outfile.seek(0)
results = numpoly.load(outfile)
assert results["x"] == X
assert numpy.all(results["array"] == ARRAY)
assert numpy.all(results["poly"] == POLY)
with open("/tmp/numpoly_savezc.npy", "wb") as dst:
func_interface.savez_compressed(dst, x=X, array=ARRAY, poly=POLY)
with open("/tmp/numpoly_savezc.npy", "rb") as src:
results = numpoly.load(src)
assert results["x"] == X
assert numpy.all(results["array"] == ARRAY)
assert numpy.all(results["poly"] == POLY)
def test_savetxt(func_interface):
outfile = TemporaryFile()
func_interface.savetxt(outfile, POLY)
outfile.seek(0)
assert numpy.all(numpoly.loadtxt(outfile) == POLY)
with open("/tmp/numpoly_save.npy", "wb") as dst:
func_interface.savetxt(dst, POLY)
with open("/tmp/numpoly_save.npy", "rb") as src:
assert numpy.all(numpoly.loadtxt(src) == POLY)
def test_pickle():
with open("/tmp/numpoly_pickle.pkl", "wb") as dst:
pickle.dump(POLY, dst)
with open("/tmp/numpoly_pickle.pkl", "rb") as src:
assert numpy.all(pickle.load(src) == POLY)
``` |
{
"source": "jonathf/pyvroom",
"score": 2
} |
#### File: src/vroom/__init__.py
```python
import sys
from typing import Optional, Sequence
from ._vroom import _main, JOB_TYPE, STEP_TYPE # type: ignore
from .amount import Amount
from .break_ import Break
from .job import Job, ShipmentStep, Shipment
from .location import Location, LocationCoordinates, LocationIndex
from .time_window import TimeWindow
from .vehicle import Vehicle
from .input.forced_service import ForcedService
from .input.input import Input
from .input.vehicle_step import (
VehicleStep,
VehicleStepStart,
VehicleStepEnd,
VehicleStepBreak,
VehicleStepSingle,
VehicleStepPickup,
VehicleStepDelivery,
VEHICLE_STEP_TYPE,
)
def main(argv: Optional[Sequence[str]] = None) -> None:
"""Run VROOM command line interface."""
_main(sys.argv if argv is None else argv)
```
#### File: src/vroom/job.py
```python
from typing import Any, Dict, List, Optional, Sequence, Set, Union
import numpy
from . import _vroom
from .amount import Amount
from .location import Location, LocationCoordinates, LocationIndex
from .time_window import TimeWindow
class JobBaseclass:
"""Baseclass for all Job classes containing common attributes."""
_id: int
_location: Location
_setup: int
_service: int
_time_windows: Sequence[TimeWindow]
_description: str
def _get_attributes(self) -> Dict[str, Any]:
"""Arguments to be used in repr view."""
return {
"id": self.id,
"location": self.location,
"setup": self.setup,
"service": self.service,
"time_windows": self.time_windows,
"description": self.description,
}
@property
def description(self) -> str:
return self._description
@property
def id(self) -> int:
return self._id
@property
def location(self) -> Location:
"""
The location where to go.
Either by index (used with duration matrix) or
by coordinate (used with map server).
"""
return Location(self._location)
@property
def service(self) -> int:
return self._service
@property
def setup(self) -> int:
return self._setup
@property
def time_windows(self) -> List[TimeWindow]:
"""Time window for when job can be delivered."""
return [TimeWindow(tw) for tw in self._time_windows]
def __repr__(self) -> str:
attributes = self._get_attributes()
args = [f"{self.id}"]
if isinstance(attributes["location"], LocationIndex):
args.append(f"{self.location.index}")
elif isinstance(attributes["location"], LocationCoordinates):
args.append(f"{self.location.coords}")
else:
args.append(f"{self.location}")
if attributes["setup"]:
args.append(f"setup={attributes['setup']}")
if attributes["service"]:
args.append(f"service={attributes['service']}")
if attributes.get("amount", False):
args.append(f"amount={numpy.asarray(attributes['amount']).tolist()}")
if attributes.get("delivery", False):
args.append(f"delivery={numpy.asarray(attributes['delivery']).tolist()}")
if attributes.get("pickup", False):
args.append(f"pickup={numpy.asarray(attributes['pickup']).tolist()}")
if attributes["time_windows"] != [TimeWindow()]:
windows = [(tw.start, tw.end) for tw in attributes["time_windows"]]
args.append(f"time_windows={windows}")
if attributes["description"]:
args.append(f"description={attributes['description']!r}")
return f"vroom.{self.__class__.__name__}({', '.join(args)})"
class Job(_vroom.Job, JobBaseclass):
"""A regular one-stop job with both a deliver and pickup that has to be performed.
Args:
id:
Job identifier number. Two jobs can not have the same
identifier.
location:
Location of the job. If interger, value interpreted as an the
column in duration matrix. If pair of numbers, value
interpreted as longitude and latitude coordinates respectively.
setup:
The cost of preparing the vehicle before actually going out for
a job.
service:
The time (in secondes) it takes to pick up/deliver shipment
when at customer.
delivery:
An interger representation of how much is being carried to
customer.
pickup:
An interger representation of how much is being carried back
from customer.
skills:
Skills required to perform job. Only vehicles which satisfies
all required skills (i.e. has at minimum all skills values
required) are allowed to perform this job.
priority:
The job priority level, where 0 is the most
important and 100 is the least important.
time_windows:
Windows for where service is allowed to begin.
Defaults to have not restraints.
description:
Optional string descriping the job.
Examples:
>>> vroom.Job(0, [4., 5.], delivery=[4], pickup=[7])
vroom.Job(0, (4.0, 5.0), delivery=[4], pickup=[7])
"""
def __init__(
self,
id: int,
location: Union[Location, int, Sequence[float]],
setup: int = 0,
service: int = 0,
delivery: Amount = Amount(),
pickup: Amount = Amount(),
skills: Optional[Set[int]] = None,
priority: int = 0,
time_windows: Sequence[TimeWindow] = (),
description: str = "",
) -> None:
if not pickup:
if not delivery:
pickup = Amount([])
delivery = Amount([])
else:
pickup = Amount([0] * len(delivery))
elif not delivery:
delivery = Amount([0] * len(pickup))
_vroom.Job.__init__(
self,
id=int(id),
location=Location(location),
setup=int(setup),
service=int(service),
delivery=Amount(delivery),
pickup=Amount(pickup),
skills=set(skills or []),
priority=int(priority),
tws=[TimeWindow(tw) for tw in time_windows] or [TimeWindow()],
description=str(description),
)
@property
def delivery(self) -> Amount:
return Amount(self._delivery)
@property
def pickup(self) -> Amount:
return Amount(self._pickup)
@property
def skills(self) -> int:
return self._skills
@property
def priority(self) -> int:
return self._priority
def _get_attributes(self) -> Dict[str, Any]:
"""Arguments to be used in repr view."""
attributes = super()._get_attributes()
if self._pickup:
attributes["pickup"] = self.pickup
if self._delivery:
attributes["delivery"] = self.delivery
if self._skills:
attributes["skills"] = self.skills
if self._priority:
attributes["priority"] = self.priority
return attributes
class ShipmentStep(JobBaseclass):
"""A delivery job that has to be performed.
Args:
id:
Job identifier number. Two jobs can not have the same
identifier.
location:
Location of the job. If interger, value interpreted as an the
column in duration matrix. If pair of numbers, value
interpreted as longitude and latitude coordinates respectively.
setup:
The cost of preparing the vehicle before actually going out for
a job.
service:
The time (in secondes) it takes to pick up/deliver shipment
when at customer.
time_windows:
Windows for where service is allowed to begin.
Defaults to have not restraints.
description:
Optional string descriping the job.
Examples:
>>> vroom.ShipmentStep(0, [4., 5.])
vroom.ShipmentStep(0, (4.0, 5.0))
"""
def __init__(
self,
id: int,
location: Union[Location, int, Sequence[float]],
setup: int = 0,
service: int = 0,
time_windows: Sequence[TimeWindow] = (),
description: str = "",
) -> None:
self._id = int(id)
self._location = Location(location)
self._setup = int(setup)
self._service = int(service)
self._time_windows = [TimeWindow(tw) for tw in time_windows] or [TimeWindow()]
self._description = str(description)
class Shipment:
"""A shipment that has to be performed.
Args:
pickup:
Description of the pickup part of the shipment.
delivery:
Description of the delivery part of the shipment.
amount:
An interger representation of how much is being carried back
from customer.
skills:
Skills required to perform job. Only vehicles which satisfies
all required skills (i.e. has at minimum all skills values
required) are allowed to perform this job.
priority:
The job priority level, where 0 is the most
important and 100 is the least important.
Examples:
>>> pickup = vroom.ShipmentStep(0, [4., 5.])
>>> delivery = vroom.ShipmentStep(1, [5., 4.])
>>> vroom.Shipment(pickup, delivery, amount=[7]) # doctest: +NORMALIZE_WHITESPACE
vroom.Shipment(vroom.ShipmentStep(0, (4.0, 5.0)),
vroom.ShipmentStep(1, (5.0, 4.0)),
amount=[7])
"""
def __init__(
self,
pickup: ShipmentStep,
delivery: ShipmentStep,
amount: Amount = Amount(),
skills: Optional[Set[int]] = None,
priority: int = 0,
) -> None:
self.pickup = pickup
self.delivery = delivery
self.amount = Amount(amount)
self.skills = skills or set()
self.priority = int(priority)
def __repr__(self) -> str:
args = [str(self.pickup), str(self.delivery)]
if self.amount:
args.append(f"amount={numpy.asarray(self.amount).tolist()}")
if self.skills:
args.append(f"skills={self.skills}")
if self.priority:
args.append(f"priority={self.priority}")
return f"vroom.{self.__class__.__name__}({', '.join(args)})"
```
#### File: pyvroom/test/test_job.py
```python
import vroom
JOB1 = vroom.Job(id=0, location=1, delivery=[4], pickup=[5])
JOB2 = vroom.Job(
id=1,
location=2,
setup=3,
service=4,
delivery=[5],
pickup=[6],
skills={7},
priority=8,
time_windows=[(9, 10)],
description="11",
)
JOB3 = vroom.Job(id=0, location=1)
PICKUP1 = vroom.ShipmentStep(id=0, location=1)
DELIVERY1 = vroom.ShipmentStep(id=0, location=1)
SHIPMENT1 = vroom.Shipment(DELIVERY1, PICKUP1)
PICKUP2 = vroom.ShipmentStep(
id=1,
location=2,
setup=3,
service=4,
time_windows=[(9, 10)],
description="12",
)
DELIVERY2 = vroom.ShipmentStep(
id=1,
location=2,
setup=3,
service=4,
time_windows=[(9, 10)],
description="11",
)
SHIPMENT2 = vroom.Shipment(
PICKUP2,
DELIVERY2,
amount=[6],
skills={7},
priority=8,
)
def test_job_repr():
assert repr(JOB1) == "vroom.Job(0, 1, delivery=[4], pickup=[5])"
assert repr(JOB2) == "vroom.Job(1, 2, setup=3, service=4, delivery=[5], pickup=[6], time_windows=[(9, 10)], description='11')"
assert repr(JOB3) == "vroom.Job(0, 1)"
assert repr(PICKUP1) == "vroom.ShipmentStep(0, 1)"
assert repr(PICKUP2) == "vroom.ShipmentStep(1, 2, setup=3, service=4, time_windows=[(9, 10)], description='12')"
assert repr(DELIVERY1) == "vroom.ShipmentStep(0, 1)"
assert repr(DELIVERY2) == "vroom.ShipmentStep(1, 2, setup=3, service=4, time_windows=[(9, 10)], description='11')"
assert repr(SHIPMENT1) == "vroom.Shipment(vroom.ShipmentStep(0, 1), vroom.ShipmentStep(0, 1))"
assert repr(SHIPMENT2) == ("vroom.Shipment("
"vroom.ShipmentStep(1, 2, setup=3, service=4, time_windows=[(9, 10)], description='12'), "
"vroom.ShipmentStep(1, 2, setup=3, service=4, time_windows=[(9, 10)], description='11'), "
"amount=[6], skills={7}, priority=8)")
def test_job_attributes():
assert JOB2.id == 1
assert JOB2.location == vroom.Location(2)
assert JOB2.setup == 3
assert JOB2.service == 4
assert not hasattr(JOB2, "amount")
assert JOB2.delivery == vroom.Amount([5])
assert JOB2.pickup == vroom.Amount([6])
assert JOB2.skills == {7}
assert JOB2.priority == 8
assert JOB2.time_windows == [vroom.TimeWindow(9, 10)]
assert JOB2.description == "11"
assert JOB3.delivery == vroom.Amount([])
assert JOB3.pickup == vroom.Amount([])
assert DELIVERY2.id == 1
assert DELIVERY2.location == vroom.Location(2)
assert DELIVERY2.setup == 3
assert DELIVERY2.service == 4
assert not hasattr(DELIVERY2, "delivery")
assert not hasattr(DELIVERY2, "pickup")
assert DELIVERY2.time_windows == [vroom.TimeWindow(9, 10)]
assert DELIVERY2.description == "11"
assert PICKUP2.id == 1
assert PICKUP2.location == vroom.Location(2)
assert PICKUP2.setup == 3
assert PICKUP2.service == 4
assert not hasattr(PICKUP2, "delivery")
assert not hasattr(PICKUP2, "pickup")
assert PICKUP2.time_windows == [vroom.TimeWindow(9, 10)]
assert PICKUP2.description == "12"
assert SHIPMENT2.amount == vroom.Amount([6])
assert SHIPMENT2.skills == {7}
assert SHIPMENT2.priority == 8
``` |
{
"source": "jonathf/wapi-python",
"score": 2
} |
#### File: wapi-python/wapi/session.py
```python
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
import requests
import json
import time
import warnings
from past.types import basestring
import configparser
from . import auth, curves, events, util
from .util import CurveException
RETRY_COUNT = 4 # Number of times to retry
RETRY_DELAY = 0.5 # Delay between retried calls, in seconds.
TIMEOUT = 300 # Default timeout for web calls, in seconds.
API_URLBASE = 'https://api.wattsight.com'
AUTH_URLBASE = 'https://auth.wattsight.com'
class ConfigException(Exception):
pass
class MetadataException(Exception):
pass
class Session(object):
""" Establish a connection to Wattsight API
Creates an object that holds the state which is needed when talking to the
Wattsight data center. To establish a session, you have to provide
suthentication information either directly by using a ```client_id` and
``client_secret`` or using a ``config_file`` .
See https://api.wattsight.com/#documentation for information how to get
your authentication data.
Parameters
----------
urlbase: url
Location of Wattsight service
config_file: path
path to the config.ini file which contains your authentication
information.
client_id: str
Your client ID
client_secret:
Your client secret.
auth_urlbase: url
Location of Wattsight authentication service
timeout: float
Timeout for REST calls, in seconds
Returns
-------
session: :class:`wapi.session.Session` object
"""
def __init__(self, urlbase=None, config_file=None, client_id=None, client_secret=None,
auth_urlbase=None, timeout=None):
self.urlbase = API_URLBASE
self.auth = None
self.timeout = TIMEOUT
self._session = requests.Session()
if config_file is not None:
self.read_config_file(config_file)
elif client_id is not None and client_secret is not None:
self.configure(client_id, client_secret, auth_urlbase)
if urlbase is not None:
self.urlbase = urlbase
if timeout is not None:
self.timeout = timeout
def read_config_file(self, config_file):
"""Set up according to configuration file with hosts and access details"""
if self.auth is not None:
raise ConfigException('Session configuration is already done')
config = configparser.RawConfigParser()
# Support being given a file-like object or a file path:
if hasattr(config_file, 'read'):
config.read_file(config_file)
else:
files_read = config.read(config_file)
if not files_read:
raise ConfigException('Configuration file with name {} '
'was not found.'.format(config_file))
urlbase = config.get('common', 'urlbase', fallback=None)
if urlbase is not None:
self.urlbase = urlbase
auth_type = config.get('common', 'auth_type')
if auth_type == 'OAuth':
client_id = config.get(auth_type, 'id')
client_secret = config.get(auth_type, 'secret')
auth_urlbase = config.get(auth_type, 'auth_urlbase', fallback=AUTH_URLBASE)
self.auth = auth.OAuth(self, client_id, client_secret, auth_urlbase)
timeout = config.get('common', 'timeout', fallback=None)
if timeout is not None:
self.timeout = float(timeout)
def configure(self, client_id, client_secret, auth_urlbase=None):
"""Programmatically set authentication parameters"""
if self.auth is not None:
raise ConfigException('Session configuration is already done')
if auth_urlbase is None:
auth_urlbase = AUTH_URLBASE
self.auth = auth.OAuth(self, client_id, client_secret, auth_urlbase)
def get_curve(self, id=None, name=None):
"""Getting a curve object
Return a curve object of the correct type. Name should be specified.
While it is possible to get a curve by id, this is not guaranteed to be
long-term stable and will be removed in future versions.
Parameters
----------
id: int
curve id (deprecated)
name: str
curve name
Returns
-------
curve object
Curve objects, can be one of:
:class:`~wapi.curves.TimeSeriesCurve`,
:class:`~wapi.curves.TaggedCurve`,
:class:`~wapi.curves.InstanceCurve`,
:class:`~wapi.curves.TaggedInstanceCurve`.
"""
if id is not None:
warnings.warn("Looking up a curve by ID will be removed in the future.", FutureWarning, stacklevel=2)
if id is None and name is None:
raise MetadataException('No curve specified')
if id is not None:
arg = util.make_arg('id', id)
else:
arg = util.make_arg('name', name)
response = self.data_request('GET', self.urlbase, '/api/curves/get?{}'.format(arg))
return self.handle_single_curve_response(response)
def search(self, query=None, id=None, name=None, commodity=None, category=None, area=None, station=None,
source=None, scenario=None, unit=None, time_zone=None, version=None, frequency=None, data_type=None,
curve_state=None, modified_since=None, only_accessible=None):
"""
Search for a curve matching various metadata.
This function searches for curves that matches the given search
parameters and returns a list of 0 or more curve objects.
A curve object can be a
:class:`~wapi.curves.TimeSeriesCurve`,
:class:`~wapi.curves.TaggedCurve`,
:class:`~wapi.curves.InstanceCurve` or a
:class:`~wapi.curves.TaggedInstanceCurve` object.
The search will return those curves matching all supplied parameters
(logical AND). For most parameters, a list of values may be supplied.
The search will match any of these values (logical OR). If a single
value contains a string with comma-separated values, these will be
treated as a list but will match with logical AND. (This only makes
sense for parameters where a curve may have multiple values:
area (border curves), category, source and scenario.)
For more details, see the REST documentation.
Parameters
----------
query: str
A query string used for a language-aware text search on both names
and descriptions of the various attributes in the curve.
id: int or lits of int
search for one or more specific id's (deprecated)
name: str or list of str
search for one or more curve names, you can use the ``*`` as
a wildcard for patter matching.
commodity: str or list of str
search for curves that match the given ``commodity`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_commodities`
category: str or list of str
search for curves that match the given ``category`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_categories`
area: str or list of str
search for curves that match the given ``area`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_areas`
station: str or list of str
search for curves that match the given ``station`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_stations`
source: str or list of str
search for curves that match the given ``source`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_sources`
scenario: str or list of str
search for curves that match the given ``scenario`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_scenarios`
unit: str or list of str
search for curves that match the given ``unit`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_units`
time_zone: str or list of str
search for curves that match the given ``time_zone`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_time_zones`
version: str or list of str
search for curves that match the given ``version`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_versions`
frequency: str or list of str
search for curves that match the given ``frequency`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_frequencies`
data_type: str or list of str
search for curves that match the given ``data_type`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_data_types`
curve_state: str or list of str
search for curves that match the given ``curve_state`` attribute.
Get valid values for this attribute with
:meth:`wapi.session.Session.get_curve_state`
modified_since: datestring, pandas.Timestamp or datetime.datetime
only return curves that where modified after given datetime.
only_accessible: bool
If True, only return curves you have (some) access to.
Returns
-------
curves: list
list of curve objects, can be one of:
:class:`~wapi.curves.TimeSeriesCurve`,
:class:`~wapi.curves.TaggedCurve`,
:class:`~wapi.curves.InstanceCurve`,
:class:`~wapi.curves.TaggedInstanceCurve`.
"""
search_terms = {
'query': query,
'id': id,
'name': name,
'commodity': commodity,
'category': category,
'area': area,
'station': station,
'source': source,
'scenario': scenario,
'unit': unit,
'time_zone': time_zone,
'version': version,
'frequency': frequency,
'data_type': data_type,
'curve_state': curve_state,
'modified_since': modified_since,
'only_accessible': only_accessible,
}
if id is not None:
warnings.warn("Searching for curves by ID will be removed in the future.", FutureWarning, stacklevel=2)
args = []
astr = ''
for key, val in search_terms.items():
if val is None:
continue
args.append(util.make_arg(key, val))
if len(args):
astr = "?{}".format("&".join(args))
# Now run the search, and try to produce a list of curves
response = self.data_request('GET', self.urlbase, '/api/curves{}'.format(astr))
return self.handle_multi_curve_response(response)
def make_curve(self, id, curve_type):
"""Return a mostly uninitialized curve object of the correct type.
This is generally a bad idea, use get_curve or search when possible."""
if curve_type in self._curve_types:
return self._curve_types[curve_type](id, None, self)
raise CurveException('Bad curve type requested')
def events(self, curve_list, start_time=None, timeout=None):
"""Get an event listener for a list of curves."""
return events.EventListener(self, curve_list, start_time=start_time, timeout=timeout)
_attributes = {'commodities', 'categories', 'areas', 'stations', 'sources', 'scenarios',
'units', 'time_zones', 'versions', 'frequencies', 'data_types',
'curve_states', 'curve_types', 'functions', 'filters'}
def get_commodities(self):
"""
Get valid values for the commodity attribute
"""
return self.get_attribute('commodities')
def get_categories(self):
"""
Get valid values for the category attribute
"""
return self.get_attribute('categories')
def get_areas(self):
"""
Get valid values for the area attribute
"""
return self.get_attribute('areas')
def get_stations(self):
"""
Get valid values for the station attribute
"""
return self.get_attribute('stations')
def get_sources(self):
"""
Get valid values for the source attribute
"""
return self.get_attribute('sources')
def get_scenarios(self):
"""
Get valid values for the scenarios attribute
"""
return self.get_attribute('scenarios')
def get_units(self):
"""
Get valid values for the unit attribute
"""
return self.get_attribute('units')
def get_time_zones(self):
"""
Get valid values for the time zone attribute
"""
return self.get_attribute('time_zones')
def get_versions(self):
"""
Get valid values for the version attribute
"""
return self.get_attribute('versions')
def get_frequencies(self):
"""
Get valid values for the frequency attribute
"""
return self.get_attribute('frequencies')
def get_data_types(self):
"""
Get valid values for the data_type attribute
"""
return self.get_attribute('data_types')
def get_curve_states(self):
"""
Get valid values for the curve_state attribute
"""
return self.get_attribute('curve_states')
def get_curve_types(self):
"""
Get valid values for the curve_type attribute
"""
return self.get_attribute('curve_types')
def get_functions(self):
"""
Get valid values for the function attribute
"""
return self.get_attribute('functions')
def get_filters(self):
"""
Get valid values for the filter attribute
"""
return self.get_attribute('filters')
def get_attribute(self, attribute):
"""Get valid values for an attribute."""
if attribute not in self._attributes:
raise MetadataException('Attribute {} is not valid'.format(attribute))
response = self.data_request('GET', self.urlbase, '/api/{}'.format(attribute))
if response.status_code == 200:
return response.json()
elif response.status_code == 204:
return None
raise MetadataException('Failed loading {}: {}'.format(attribute,
response.content.decode()))
_curve_types = {
util.TIME_SERIES: curves.TimeSeriesCurve,
util.TAGGED: curves.TaggedCurve,
util.INSTANCES: curves.InstanceCurve,
util.TAGGED_INSTANCES: curves.TaggedInstanceCurve,
}
_meta_keys = ('id', 'name', 'frequency', 'time_zone', 'curve_type')
def _build_curve(self, metadata):
for key in self._meta_keys:
if key not in metadata:
raise MetadataException('Mandatory key {} not found in metadata'.format(key))
curve_id = int(metadata['id'])
if metadata['curve_type'] in self._curve_types:
c = self._curve_types[metadata['curve_type']](curve_id, metadata, self)
return c
raise CurveException('Unknown curve type ({})'.format(metadata['curve_type']))
def data_request(self, req_type, urlbase, url, data=None, rawdata=None, authval=None,
stream=False, retries=RETRY_COUNT):
"""Run a call to the backend, dealing with authentication etc."""
headers = {}
if not urlbase:
urlbase = self.urlbase
longurl = urljoin(urlbase, url)
databytes = None
if data is not None:
headers['content-type'] = 'application/json'
if isinstance(data, basestring):
databytes = data.encode()
else:
databytes = json.dumps(data).encode()
if data is None and rawdata is not None:
databytes = rawdata
if self.auth is not None:
self.auth.validate_auth()
headers.update(self.auth.get_headers(databytes))
timeout = None
try:
res = self._session.request(method=req_type, url=longurl, data=databytes,
headers=headers, auth=authval, stream=stream, timeout=self.timeout)
except requests.exceptions.Timeout as e:
timeout = e
res = None
if (timeout is not None or (500 <= res.status_code < 600) or res.status_code == 408) and retries > 0:
if RETRY_DELAY > 0:
time.sleep(RETRY_DELAY)
return self.data_request(req_type, urlbase, url, data, rawdata, authval, stream, retries-1)
if timeout is not None:
raise timeout
return res
def handle_single_curve_response(self, response):
if not response.ok:
raise MetadataException('Failed to load curve: {}'
.format(response.content.decode()))
metadata = response.json()
return self._build_curve(metadata)
def handle_multi_curve_response(self, response):
if not response.ok:
raise MetadataException('Curve search failed: {}'
.format(response.content.decode()))
metadata_list = response.json()
result = []
for metadata in metadata_list:
result.append(self._build_curve(metadata))
return result
``` |
{
"source": "jonathlt/first-step-functions-python",
"score": 3
} |
#### File: first-step-functions-python/src/ValidateName.py
```python
valid_names = ["Danilo", "Daniel", "Dani"]
class NameError(Exception): pass
def handler(event, context):
name = event.get("name",None)
if name:
if name in valid_names:
return event
else:
raise NameError("WrongName")
else:
raise NameError("NoName")
``` |
{
"source": "jonathom/openeo-python-client",
"score": 2
} |
#### File: openeo-python-client/openeo/processes.py
```python
import builtins
from openeo.internal.processes.builder import ProcessBuilderBase, UNSET
class ProcessBuilder(ProcessBuilderBase):
def __add__(self, other) -> 'ProcessBuilder':
return self.add(other)
def __radd__(self, other) -> 'ProcessBuilder':
return add(other, self)
def __sub__(self, other) -> 'ProcessBuilder':
return self.subtract(other)
def __rsub__(self, other) -> 'ProcessBuilder':
return subtract(other, self)
def __mul__(self, other) -> 'ProcessBuilder':
return self.multiply(other)
def __rmul__(self, other) -> 'ProcessBuilder':
return multiply(other, self)
def __truediv__(self, other) -> 'ProcessBuilder':
return self.divide(other)
def __rtruediv__(self, other) -> 'ProcessBuilder':
return divide(other, self)
def __neg__(self) -> 'ProcessBuilder':
return self.multiply(-1)
def __pow__(self, other) -> 'ProcessBuilder':
return self.power(other)
def __getitem__(self, key) -> 'ProcessBuilder':
if isinstance(key, builtins.int):
return self.array_element(index=key)
else:
return self.array_element(label=key)
def __eq__(self, other) -> 'ProcessBuilder':
return eq(self, other)
def __ne__(self, other) -> 'ProcessBuilder':
return neq(self, other)
def absolute(self) -> 'ProcessBuilder':
"""
Absolute value
:param self: A number.
:return: The computed absolute value.
"""
return absolute(x=self)
def add(self, y) -> 'ProcessBuilder':
"""
Addition of two numbers
:param self: The first summand.
:param y: The second summand.
:return: The computed sum of the two numbers.
"""
return add(x=self, y=y)
def add_dimension(self, name, label, type=UNSET) -> 'ProcessBuilder':
"""
Add a new dimension
:param self: A data cube to add the dimension to.
:param name: Name for the dimension.
:param label: A dimension label.
:param type: The type of dimension, defaults to `other`.
:return: The data cube with a newly added dimension. The new dimension has exactly one dimension label.
All other dimensions remain unchanged.
"""
return add_dimension(data=self, name=name, label=label, type=type)
def aggregate_spatial(self, geometries, reducer, target_dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Zonal statistics for geometries
:param self: A raster data cube. The data cube must have been reduced to only contain two spatial
dimensions and a third dimension the values are aggregated for, for example the temporal dimension to
get a time series. Otherwise, this process fails with the `TooManyDimensions` exception. The data cube
implicitly gets restricted to the bounds of the geometries as if ``filter_spatial()`` would have been
used with the same values for the corresponding parameters immediately before this process.
:param geometries: Geometries as GeoJSON on which the aggregation will be based. One value will be
computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple
values will be computed, one value per contained `Feature`. For example, a single value will be
computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two
polygons. - For **polygons**, the process considers all pixels for which the point at the pixel center
intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC). -
For **points**, the process considers the closest pixel center. - For **lines** (line strings), the
process considers all the pixels whose centers are closest to at least one point on the line. Thus,
pixels may be part of multiple geometries and be part of multiple aggregations. To maximize
interoperability, a nested `GeometryCollection` should be avoided. Furthermore, a `GeometryCollection`
composed of a single type of geometries should be avoided in favour of the corresponding multi-part
type (e.g. `MultiPolygon`).
:param reducer: A reducer to be applied on all values of each geometry. A reducer is a single process
such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the
category 'reducer' for such processes.
:param target_dimension: The new dimension name to be used for storing the results. Defaults to
`result`.
:param context: Additional data to be passed to the reducer.
:return: A vector data cube with the computed results and restricted to the bounds of the geometries.
The computed value is used for the dimension with the name that was specified in the parameter
`target_dimension`. The computation also stores information about the total count of pixels (valid +
invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are
added as a new dimension with a dimension name derived from `target_dimension` by adding the suffix
`_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.
"""
return aggregate_spatial(data=self, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context)
def aggregate_spatial_binary(self, geometries, reducer, target_dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Zonal statistics for geometries by binary aggregation
:param self: A raster data cube. The data cube implicitly gets restricted to the bounds of the
geometries as if ``filter_spatial()`` would have been used with the same values for the corresponding
parameters immediately before this process.
:param geometries: Geometries as GeoJSON on which the aggregation will be based.
:param reducer: A reduction operator to be applied consecutively on tuples of values. It must be both
associative and commutative as the execution may be executed in parallel and therefore the order of
execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or
consist of multiple sub-processes.
:param target_dimension: The new dimension name to be used for storing the results. Defaults to
`result`.
:param context: Additional data to be passed to the reducer.
:return: A vector data cube with the computed results and restricted to the bounds of the geometries.
The computed value is stored in dimension with the name that was specified in the parameter
`target_dimension`. The computation also stores information about the total count of pixels (valid +
invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are
stored as new dimension with a dimension name derived from `target_dimension` by adding the suffix
`_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.
"""
return aggregate_spatial_binary(data=self, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context)
def aggregate_spatial_window(self, reducer, size, boundary=UNSET, align=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Zonal statistics for rectangular windows
:param self: A raster data cube with exactly two horizontal spatial dimensions and an arbitrary number
of additional dimensions. The process is applied to all additional dimensions individually.
:param reducer: A reducer to be applied on the list of values, which contain all pixels covered by the
window. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single
value for a list of values, see the category 'reducer' for such processes.
:param size: Window size in pixels along the horizontal spatial dimensions. The first value
corresponds to the `x` axis, the second value corresponds to the `y` axis.
:param boundary: Behavior to apply if the number of values for the axes `x` and `y` is not a multiple
of the corresponding value in the `size` parameter. Options are: - `pad` (default): pad the data cube
with the no-data value `null` to fit the required window size. - `trim`: trim the data cube to fit the
required window size. Set the parameter `align` to specifies to which corner the data is aligned to.
:param align: If the data requires padding or trimming (see parameter `boundary`), specifies to which
corner of the spatial extent the data is aligned to. For example, if the data is aligned to the upper
left, the process pads/trims at the lower-right.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values and the same dimensions. The resolution will
change depending on the chosen values for the `size` and `boundary` parameter. It usually decreases for
the dimensions which have the corresponding parameter `size` set to values greater than 1. The
dimension labels will be set to the coordinate at the center of the window. The other dimension
properties (name, type and reference system) remain unchanged.
"""
return aggregate_spatial_window(data=self, reducer=reducer, size=size, boundary=boundary, align=align, context=context)
def aggregate_temporal(self, intervals, reducer, labels=UNSET, dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Temporal aggregations
:param self: A data cube.
:param intervals: Left-closed temporal intervals, which are allowed to overlap. Each temporal interval
in the array has exactly two elements: 1. The first element is the start of the temporal interval. The
specified instance in time is **included** in the interval. 2. The second element is the end of the
temporal interval. The specified instance in time is **excluded** from the interval. The specified
temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339
prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process
allows the value '24' for the hour** of an end time in order to make it possible that left-closed time
intervals can fully cover the day.
:param reducer: A reducer to be applied for the values contained in each interval. A reducer is a
single process such as ``mean()`` or a set of processes, which computes a single value for a list of
values, see the category 'reducer' for such processes. Intervals may not contain any values, which for
most reducers leads to no-data (`null`) values by default.
:param labels: Distinct labels for the intervals, which can contain dates and/or times. Is only
required to be specified if the values for the start of the temporal intervals are not distinct and
thus the default labels would not be unique. The number of labels and the number of groups need to be
equal.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except for the resolution and dimension labels of
the given temporal dimension.
"""
return aggregate_temporal(data=self, intervals=intervals, reducer=reducer, labels=labels, dimension=dimension, context=context)
def aggregate_temporal_period(self, period, reducer, dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Temporal aggregations based on calendar hierarchies
:param self: A data cube.
:param period: The time intervals to aggregate. The following pre-defined values are available: *
`hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year * `dekad`: Ten day periods,
counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third
dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each
year. * `month`: Month of the year * `season`: Three month periods of the calendar seasons (December -
February, March - May, June - August, September - November). * `tropical-season`: Six month periods of
the tropical seasons (November - April, May - October). * `year`: Proleptic years * `decade`: Ten year
periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a
0 to the next year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending in a 0.
:param reducer: A reducer to be applied for the values contained in each period. A reducer is a single
process such as ``mean()`` or a set of processes, which computes a single value for a list of values,
see the category 'reducer' for such processes. Periods may not contain any values, which for most
reducers leads to no-data (`null`) values by default.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except for the resolution and dimension labels of
the given temporal dimension. The specified temporal dimension has the following dimension labels
(`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month): * `hour`: `YYYY-MM-
DD-00` - `YYYY-MM-DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` - `YYYY-52` * `dekad`:
`YYYY-00` - `YYYY-36` * `month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf` (December - February),
`YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September - November). * `tropical-
season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October). * `year`: `YYYY` * `decade`:
`YYY0` * `decade-ad`: `YYY1`
"""
return aggregate_temporal_period(data=self, period=period, reducer=reducer, dimension=dimension, context=context)
def all(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Are all of the values true?
:param self: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return all(data=self, ignore_nodata=ignore_nodata)
def and_(self, y) -> 'ProcessBuilder':
"""
Logical AND
:param self: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical AND.
"""
return and_(x=self, y=y)
def anomaly(self, normals, period) -> 'ProcessBuilder':
"""
Compute anomalies
:param self: A data cube with exactly one temporal dimension and the following dimension labels for the
given period (`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month): *
`hour`: `YYYY-MM-DD-00` - `YYYY-MM-DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` -
`YYYY-52` * `dekad`: `YYYY-00` - `YYYY-36` * `month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf`
(December - February), `YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September -
November). * `tropical-season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October). *
`year`: `YYYY` * `decade`: `YYY0` * `decade-ad`: `YYY1` * `single-period` / `climatology-period`: Any
``aggregate_temporal_period()`` can compute such a data cube.
:param normals: A data cube with normals, e.g. daily, monthly or yearly values computed from a process
such as ``climatological_normal()``. Must contain exactly one temporal dimension with the following
dimension labels for the given period: * `hour`: `00` - `23` * `day`: `001` - `365` * `week`: `01` -
`52` * `dekad`: `00` - `36` * `month`: `01` - `12` * `season`: `djf` (December - February), `mam`
(March - May), `jja` (June - August), `son` (September - November) * `tropical-season`: `ndjfma`
(November - April), `mjjaso` (May - October) * `year`: Four-digit year numbers * `decade`: Four-digit
year numbers, the last digit being a `0` * `decade-ad`: Four-digit year numbers, the last digit being a
`1` * `single-period` / `climatology-period`: A single dimension label with any name is expected.
:param period: Specifies the time intervals available in the normals data cube. The following options
are available: * `hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year *
`dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 -
end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad
is Feb, 1 - Feb, 10 each year. * `month`: Month of the year * `season`: Three month periods of the
calendar seasons (December - February, March - May, June - August, September - November). * `tropical-
season`: Six month periods of the tropical seasons (November - April, May - October). * `year`:
Proleptic years * `decade`: Ten year periods ([0-to-9
decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next
year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending in a 0. * `single-period` /
`climatology-period`: A single period of arbitrary length
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged.
"""
return anomaly(data=self, normals=normals, period=period)
def any(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Is at least one value true?
:param self: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return any(data=self, ignore_nodata=ignore_nodata)
def apply(self, process, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to each pixel
:param self: A data cube.
:param process: A process that accepts and returns a single value and is applied on each individual
value in the data cube. The process may consist of multiple sub-processes and could, for example,
consist of processes such as ``abs()`` or ``linear_scale_range()``.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply(data=self, process=process, context=context)
def apply_dimension(self, process, dimension, target_dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to pixels along a dimension
:param self: A data cube.
:param process: Process to be applied on all pixel values. The specified process needs to accept an
array and must return an array with at least one element. A process may consist of multiple sub-
processes.
:param dimension: The name of the source dimension to apply the process on. Fails with a
`DimensionNotAvailable` exception if the specified dimension does not exist.
:param target_dimension: The name of the target dimension or `null` (the default) to use the source
dimension specified in the parameter `dimension`. By specifying a target dimension, the source
dimension is removed. The target dimension with the specified name and the type `other` (see
``add_dimension()``) is created, if it doesn't exist yet.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values. All dimensions stay the same, except for the
dimensions specified in corresponding parameters. There are three cases how the dimensions can change:
1. The source dimension is the target dimension: - The (number of) dimensions remain unchanged as
the source dimension is the target dimension. - The source dimension properties name and type remain
unchanged. - The dimension labels, the reference system and the resolution are preserved only if the
number of pixel values in the source dimension is equal to the number of values computed by the
process. Otherwise, all other dimension properties change as defined in the list below. 2. The source
dimension is not the target dimension and the latter exists: - The number of dimensions decreases by
one as the source dimension is dropped. - The target dimension properties name and type remain
unchanged. All other dimension properties change as defined in the list below. 3. The source dimension
is not the target dimension and the latter does not exist: - The number of dimensions remain
unchanged, but the source dimension is replaced with the target dimension. - The target dimension
has the specified name and the type other. All other dimension properties are set as defined in the
list below. Unless otherwise stated above, for the given (target) dimension the following applies: -
the number of dimension labels is equal to the number of values computed by the process, - the
dimension labels are incrementing integers starting from zero, - the resolution changes, and - the
reference system is undefined.
"""
return apply_dimension(data=self, process=process, dimension=dimension, target_dimension=target_dimension, context=context)
def apply_kernel(self, kernel, factor=UNSET, border=UNSET, replace_invalid=UNSET) -> 'ProcessBuilder':
"""
Apply a spatial convolution with a kernel
:param self: A data cube.
:param kernel: Kernel as a two-dimensional array of weights. The inner level of the nested array aligns
with the `x` axis and the outer level aligns with the `y` axis. Each level of the kernel must have an
uneven number of elements, otherwise the process throws a `KernelDimensionsUneven` exception.
:param factor: A factor that is multiplied to each value after the kernel has been applied. This is
basically a shortcut for explicitly multiplying each value by a factor afterwards, which is often
required for some kernel-based algorithms such as the Gaussian blur.
:param border: Determines how the data is extended when the kernel overlaps with the borders. Defaults
to fill the border with zeroes. The following options are available: * *numeric value* - fill with a
user-defined constant number `n`: `nnnnnn|abcdefgh|nnnnnn` (default, with `n` = 0) * `replicate` -
repeat the value from the pixel at the border: `aaaaaa|abcdefgh|hhhhhh` * `reflect` - mirror/reflect
from the border: `fedcba|abcdefgh|hgfedc` * `reflect_pixel` - mirror/reflect from the center of the
pixel at the border: `gfedcb|abcdefgh|gfedcb` * `wrap` - repeat/wrap the image:
`cdefgh|abcdefgh|abcdef`
:param replace_invalid: This parameter specifies the value to replace non-numerical or infinite
numerical values with. By default, those values are replaced with zeroes.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply_kernel(data=self, kernel=kernel, factor=factor, border=border, replace_invalid=replace_invalid)
def apply_neighborhood(self, process, size, overlap=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to pixels in a n-dimensional neighborhood
:param self: A data cube.
:param process: Process to be applied on all neighborhoods.
:param size: Neighborhood sizes along each dimension. This object maps dimension names to either a
physical measure (e.g. 100 m, 10 days) or pixels (e.g. 32 pixels). For dimensions not specified, the
default is to provide all values. Be aware that including all values from overly large dimensions may
not be processed at once.
:param overlap: Overlap of neighborhoods along each dimension to avoid border effects. For instance a
temporal dimension can add 1 month before and after a neighborhood. In the spatial dimensions, this is
often a number of pixels. The overlap specified is added before and after, so an overlap of 8 pixels
will add 8 pixels on both sides of the window, so 16 in total. Be aware that large overlaps increase
the need for computational resources and modifying overlapping data in subsequent operations have no
effect.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply_neighborhood(data=self, process=process, size=size, overlap=overlap, context=context)
def arccos(self) -> 'ProcessBuilder':
"""
Inverse cosine
:param self: A number.
:return: The computed angle in radians.
"""
return arccos(x=self)
def arcosh(self) -> 'ProcessBuilder':
"""
Inverse hyperbolic cosine
:param self: A number.
:return: The computed angle in radians.
"""
return arcosh(x=self)
def arcsin(self) -> 'ProcessBuilder':
"""
Inverse sine
:param self: A number.
:return: The computed angle in radians.
"""
return arcsin(x=self)
def arctan(self) -> 'ProcessBuilder':
"""
Inverse tangent
:param self: A number.
:return: The computed angle in radians.
"""
return arctan(x=self)
def arctan2(self, x) -> 'ProcessBuilder':
"""
Inverse tangent of two numbers
:param self: A number to be used as the dividend.
:param x: A number to be used as the divisor.
:return: The computed angle in radians.
"""
return arctan2(y=self, x=x)
def ard_normalized_radar_backscatter(self, elevation_model=UNSET, contributing_area=UNSET, ellipsoid_incidence_angle=UNSET, noise_removal=UNSET) -> 'ProcessBuilder':
"""
CARD4L compliant SAR NRB generation
:param self: The source data cube containing SAR input.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the
back-end to choose, which will improve portability, but reduce reproducibility.
:param contributing_area: If set to `true`, a DEM-based local contributing area band named
`contributing_area` is added. The values are given in square meters.
:param ellipsoid_incidence_angle: If set to `true`, an ellipsoidal incidence angle band named
`ellipsoid_incidence_angle` is added. The values are given in degrees.
:param noise_removal: If set to `false`, no noise removal is applied. Defaults to `true`, which removes
noise.
:return: Backscatter values expressed as gamma0 in linear scale. In addition to the bands
`contributing_area` and `ellipsoid_incidence_angle` that can optionally be added with corresponding
parameters, the following bands are always added to the data cube: - `mask`: A data mask that
indicates which values are valid (1), invalid (0) or contain no-data (null). - `local_incidence_angle`:
A band with DEM-based local incidence angles in degrees. The data returned is CARD4L compliant with
corresponding metadata.
"""
return ard_normalized_radar_backscatter(data=self, elevation_model=elevation_model, contributing_area=contributing_area, ellipsoid_incidence_angle=ellipsoid_incidence_angle, noise_removal=noise_removal)
def ard_surface_reflectance(self, atmospheric_correction_method, cloud_detection_method, elevation_model=UNSET, atmospheric_correction_options=UNSET, cloud_detection_options=UNSET) -> 'ProcessBuilder':
"""
CARD4L compliant Surface Reflectance generation
:param self: The source data cube containing multi-spectral optical top of the atmosphere (TOA)
reflectances. There must be a single dimension of type `bands` available.
:param atmospheric_correction_method: The atmospheric correction method to use.
:param cloud_detection_method: The cloud detection method to use. Each method supports detecting
different atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water
vapour in optical imagery.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the
back-end to choose, which will improve portability, but reduce reproducibility.
:param atmospheric_correction_options: Proprietary options for the atmospheric correction method.
Specifying proprietary options will reduce portability.
:param cloud_detection_options: Proprietary options for the cloud detection method. Specifying
proprietary options will reduce portability.
:return: Data cube containing bottom of atmosphere reflectances for each spectral band in the source
data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null)
are directly set in the bands. Depending on the methods used, several additional bands will be added to
the data cube: Data cube containing bottom of atmosphere reflectances for each spectral band in the
source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the
methods used, several additional bands will be added to the data cube: - `date` (optional): Specifies
per-pixel acquisition timestamps. - `incomplete-testing` (required): Identifies pixels with a value of
1 for which the per-pixel tests (at least saturation, cloud and cloud shadows, see CARD4L specification
for details) have not all been successfully completed. Otherwise, the value is 0. - `saturation`
(required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are
saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}`
with `{band}` being the band name from the source data cube. - `cloud`, `shadow` (both
required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels
being an atmospheric disturbance such as clouds. All bands have values between 0 (clear) and 1, which
describes the probability that it is an atmospheric disturbance. - `snow-ice` (optional): Points to a
file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describe
the probability and must be between 0 and 1. - `land-water` (optional): Indicates whether a pixel is
assessed as being land (1) or water (0). All values describe the probability and must be between 0 and
1. - `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees. - `azimuth`
(optional): Specifies per-pixel azimuth angles in degrees. - `sun-azimuth:` (optional): Specifies per-
pixel sun azimuth angles in degrees. - `sun-elevation` (optional): Specifies per-pixel sun elevation
angles in degrees. - `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not
directly illuminated due to terrain shadowing. Otherwise, the value is 0. - `terrain-occlusion`
(optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain
occlusion during off-nadir viewing. Otherwise, the value is 0. - `terrain-illumination` (optional):
Contains coefficients used for terrain illumination correction are provided for each pixel. The data
returned is CARD4L compliant with corresponding metadata.
"""
return ard_surface_reflectance(data=self, atmospheric_correction_method=atmospheric_correction_method, cloud_detection_method=cloud_detection_method, elevation_model=elevation_model, atmospheric_correction_options=atmospheric_correction_options, cloud_detection_options=cloud_detection_options)
def array_append(self, value) -> 'ProcessBuilder':
"""
Append a value to an array
:param self: An array.
:param value: Value to append to the array.
:return: The new array with the value being appended.
"""
return array_append(data=self, value=value)
def array_apply(self, process, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to each array element
:param self: An array.
:param process: A process that accepts and returns a single value and is applied on each individual
value in the array. The process may consist of multiple sub-processes and could, for example, consist
of processes such as ``abs()`` or ``linear_scale_range()``.
:param context: Additional data to be passed to the process.
:return: An array with the newly computed values. The number of elements are the same as for the
original array.
"""
return array_apply(data=self, process=process, context=context)
def array_concat(self, array2) -> 'ProcessBuilder':
"""
Merge two arrays
:param self: The first array.
:param array2: The second array.
:return: The merged array.
"""
return array_concat(array1=self, array2=array2)
def array_contains(self, value) -> 'ProcessBuilder':
"""
Check whether the array contains a given value
:param self: List to find the value in.
:param value: Value to find in `data`.
:return: `true` if the list contains the value, false` otherwise.
"""
return array_contains(data=self, value=value)
def array_create(self=UNSET, repeat=UNSET) -> 'ProcessBuilder':
"""
Create an array
:param self: A (native) array to fill the newly created array with. Defaults to an empty array.
:param repeat: The number of times the (native) array specified in `data` is repeatedly added after
each other to the new array being created. Defaults to `1`.
:return: The newly created array.
"""
return array_create(data=self, repeat=repeat)
def array_create_labeled(self, labels) -> 'ProcessBuilder':
"""
Create a labeled array
:param self: An array of values to be used.
:param labels: An array of labels to be used.
:return: The newly created labeled array.
"""
return array_create_labeled(data=self, labels=labels)
def array_element(self, index=UNSET, label=UNSET, return_nodata=UNSET) -> 'ProcessBuilder':
"""
Get an element from an array
:param self: An array.
:param index: The zero-based index of the element to retrieve.
:param label: The label of the element to retrieve. Throws an `ArrayNotLabeled` exception, if the given
array is not a labeled array and this parameter is set.
:param return_nodata: By default this process throws an `ArrayElementNotAvailable` exception if the
index or label is invalid. If you want to return `null` instead, set this flag to `true`.
:return: The value of the requested element.
"""
return array_element(data=self, index=index, label=label, return_nodata=return_nodata)
def array_filter(self, condition, context=UNSET) -> 'ProcessBuilder':
"""
Filter an array based on a condition
:param self: An array.
:param condition: A condition that is evaluated against each value, index and/or label in the array.
Only the array elements for which the condition returns `true` are preserved.
:param context: Additional data to be passed to the condition.
:return: An array filtered by the specified condition. The number of elements are less than or equal
compared to the original array.
"""
return array_filter(data=self, condition=condition, context=context)
def array_find(self, value) -> 'ProcessBuilder':
"""
Get the index for a value in an array
:param self: List to find the value in.
:param value: Value to find in `data`.
:return: The index of the first element with the specified value. If no element was found, `null` is
returned.
"""
return array_find(data=self, value=value)
def array_find_label(self, label) -> 'ProcessBuilder':
"""
Get the index for a label in a labeled array
:param self: List to find the label in.
:param label: Label to find in `data`.
:return: The index of the element with the specified label assigned. If no such label was found, `null`
is returned.
"""
return array_find_label(data=self, label=label)
def array_interpolate_linear(self) -> 'ProcessBuilder':
"""
One-dimensional linear interpolation for arrays
:param self: An array of numbers and no-data values. If the given array is a labeled array, the labels
must have a natural/inherent label order and the process expects the labels to be sorted accordingly.
This is the default behavior in openEO for spatial and temporal dimensions.
:return: An array with no-data values being replaced with interpolated values. If not at least 2
numerical values are available in the array, the array stays the same.
"""
return array_interpolate_linear(data=self)
def array_labels(self) -> 'ProcessBuilder':
"""
Get the labels for an array
:param self: An array with labels.
:return: The labels as an array.
"""
return array_labels(data=self)
def array_modify(self, values, index, length=UNSET) -> 'ProcessBuilder':
"""
Change the content of an array (insert, remove, update)
:param self: An array.
:param values: The values to fill the array with.
:param index: The index of the element to insert the value(s) before. If the index is greater than the
number of elements, the process throws an `ArrayElementNotAvailable` exception. To insert after the
last element, there are two options: 1. Use the simpler processes ``array_append()`` to append a
single value or ``array_concat`` to append multiple values. 2. Specify the number of elements in the
array. You can retrieve the number of elements with the process ``count()``, having the parameter
`condition` set to `true`.
:param length: The number of elements to replace. This parameter has no effect in case the given
`index` does not exist in the array given.
:return: An array with values added, updated or removed.
"""
return array_modify(data=self, values=values, index=index, length=length)
def arsinh(self) -> 'ProcessBuilder':
"""
Inverse hyperbolic sine
:param self: A number.
:return: The computed angle in radians.
"""
return arsinh(x=self)
def artanh(self) -> 'ProcessBuilder':
"""
Inverse hyperbolic tangent
:param self: A number.
:return: The computed angle in radians.
"""
return artanh(x=self)
def atmospheric_correction(self, method, elevation_model=UNSET, options=UNSET) -> 'ProcessBuilder':
"""
Apply atmospheric correction
:param self: Data cube containing multi-spectral optical top of atmosphere reflectances to be
corrected.
:param method: The atmospheric correction method to use. To get reproducible results, you have to set a
specific method. Set to `null` to allow the back-end to choose, which will improve portability, but
reduce reproducibility as you *may* get different results if you run the processes multiple times.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the
back-end to choose, which will improve portability, but reduce reproducibility.
:param options: Proprietary options for the atmospheric correction method. Specifying proprietary
options will reduce portability.
:return: Data cube containing bottom of atmosphere reflectances.
"""
return atmospheric_correction(data=self, method=method, elevation_model=elevation_model, options=options)
def between(self, min, max, exclude_max=UNSET) -> 'ProcessBuilder':
"""
Between comparison
:param self: The value to check.
:param min: Lower boundary (inclusive) to check against.
:param max: Upper boundary (inclusive) to check against.
:param exclude_max: Exclude the upper boundary `max` if set to `true`. Defaults to `false`.
:return: `true` if `x` is between the specified bounds, otherwise `false`.
"""
return between(x=self, min=min, max=max, exclude_max=exclude_max)
def ceil(self) -> 'ProcessBuilder':
"""
Round fractions up
:param self: A number to round up.
:return: The number rounded up.
"""
return ceil(x=self)
def climatological_normal(self, period, climatology_period=UNSET) -> 'ProcessBuilder':
"""
Compute climatology normals
:param self: A data cube with exactly one temporal dimension. The data cube must span at least the
temporal interval specified in the parameter `climatology-period`. Seasonal periods may span two
consecutive years, e.g. temporal winter that includes months December, January and February. If the
required months before the actual climate period are available, the season is taken into account. If
not available, the first season is not taken into account and the seasonal mean is based on one year
less than the other seasonal normals. The incomplete season at the end of the last year is never taken
into account.
:param period: The time intervals to aggregate the average value for. The following pre-defined
frequencies are supported: * `day`: Day of the year * `month`: Month of the year * `climatology-
period`: The period specified in the `climatology-period`. * `season`: Three month periods of the
calendar seasons (December - February, March - May, June - August, September - November). * `tropical-
season`: Six month periods of the tropical seasons (November - April, May - October).
:param climatology_period: The climatology period as a closed temporal interval. The first element of
the array is the first year to be fully included in the temporal interval. The second element is the
last year to be fully included in the temporal interval. The default period is from 1981 until 2010
(both inclusive).
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the temporal
dimension. The temporal dimension has the following dimension labels: * `day`: `001` - `365` *
`month`: `01` - `12` * `climatology-period`: `climatology-period` * `season`: `djf` (December -
February), `mam` (March - May), `jja` (June - August), `son` (September - November) * `tropical-
season`: `ndjfma` (November - April), `mjjaso` (May - October)
"""
return climatological_normal(data=self, period=period, climatology_period=climatology_period)
def clip(self, min, max) -> 'ProcessBuilder':
"""
Clip a value between a minimum and a maximum
:param self: A number.
:param min: Minimum value. If the value is lower than this value, the process will return the value of
this parameter.
:param max: Maximum value. If the value is greater than this value, the process will return the value
of this parameter.
:return: The value clipped to the specified range.
"""
return clip(x=self, min=min, max=max)
def cloud_detection(self, method, options=UNSET) -> 'ProcessBuilder':
"""
Create cloud masks
:param self: The source data cube containing multi-spectral optical top of the atmosphere (TOA)
reflectances on which to perform cloud detection.
:param method: The cloud detection method to use. To get reproducible results, you have to set a
specific method. Set to `null` to allow the back-end to choose, which will improve portability, but
reduce reproducibility as you *may* get different results if you run the processes multiple times.
:param options: Proprietary options for the cloud detection method. Specifying proprietary options will
reduce portability.
:return: A data cube with bands for the atmospheric disturbances. Each of the masks contains values
between 0 and 1. The data cube has the same spatial and temporal dimensions as the source data cube and
a dimension that contains a dimension label for each of the supported/considered atmospheric
disturbance.
"""
return cloud_detection(data=self, method=method, options=options)
def constant(self) -> 'ProcessBuilder':
"""
Define a constant value
:param self: The value of the constant.
:return: The value of the constant.
"""
return constant(x=self)
def cos(self) -> 'ProcessBuilder':
"""
Cosine
:param self: An angle in radians.
:return: The computed cosine of `x`.
"""
return cos(x=self)
def cosh(self) -> 'ProcessBuilder':
"""
Hyperbolic cosine
:param self: An angle in radians.
:return: The computed hyperbolic cosine of `x`.
"""
return cosh(x=self)
def count(self, condition=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Count the number of elements
:param self: An array with elements of any data type.
:param condition: A condition consists of one or more processes, which in the end return a boolean
value. It is evaluated against each element in the array. An element is counted only if the condition
returns `true`. Defaults to count valid elements in a list (see ``is_valid()``). Setting this parameter
to boolean `true` counts all elements in the list.
:param context: Additional data to be passed to the condition.
:return: The counted number of elements.
"""
return count(data=self, condition=condition, context=context)
def create_raster_cube(self) -> 'ProcessBuilder':
"""
Create an empty raster data cube
:return: An empty raster data cube with zero dimensions.
"""
return create_raster_cube()
def cummax(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative maxima
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative maxima.
"""
return cummax(data=self, ignore_nodata=ignore_nodata)
def cummax(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative maxima
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative maxima.
"""
return cummax(data=self, ignore_nodata=ignore_nodata)
def cummin(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative minima
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative minima.
"""
return cummin(data=self, ignore_nodata=ignore_nodata)
def cummin(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative minima
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative minima.
"""
return cummin(data=self, ignore_nodata=ignore_nodata)
def cumproduct(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative products
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative products.
"""
return cumproduct(data=self, ignore_nodata=ignore_nodata)
def cumproduct(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative products
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative products.
"""
return cumproduct(data=self, ignore_nodata=ignore_nodata)
def cumsum(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative sums
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative sums.
"""
return cumsum(data=self, ignore_nodata=ignore_nodata)
def cumsum(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Cumulative sums
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following
elements.
:return: An array with the computed cumulative sums.
"""
return cumsum(data=self, ignore_nodata=ignore_nodata)
def date_shift(self, value, unit) -> 'ProcessBuilder':
"""
Manipulates dates and times by addition or subtraction
:param self: The date (and optionally time) to manipulate. If the given date doesn't include the time,
the process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond
part of the time is optional and defaults to `0` if not given.
:param value: The period of time in the unit given that is added (positive numbers) or subtracted
(negative numbers). The value `0` doesn't have any effect.
:param unit: The unit for the value given. The following pre-defined units are available: -
millisecond: Milliseconds - second: Seconds - leap seconds are ignored in computations. - minute:
Minutes - hour: Hours - day: Days - changes only the the day part of a date - week: Weeks (equivalent
to 7 days) - month: Months - year: Years Manipulations with the unit `year`, `month`, `week` or `day`
do never change the time. If any of the manipulations result in an invalid date or time, the
corresponding part is rounded down to the next valid date or time respectively. For example, adding a
month to `2020-01-31` would result in `2020-02-29`.
:return: The manipulated date. If a time component was given in the parameter `date`, the time
component is returned with the date.
"""
return date_shift(date=self, value=value, unit=unit)
def debug(self, code=UNSET, level=UNSET, message=UNSET) -> 'ProcessBuilder':
"""
Publish debugging information
:param self: Data to publish.
:param code: An identifier to help identify the log entry in a bunch of other log entries.
:param level: The severity level of this message, defaults to `info`. Note that the level `error`
forces the computation to be stopped!
:param message: A message to send in addition to the data.
:return: Returns the data as passed to the `data` parameter.
"""
return debug(data=self, code=code, level=level, message=message)
def debug(self, code=UNSET, level=UNSET, message=UNSET) -> 'ProcessBuilder':
"""
Publish debugging information
:param self: Data to publish.
:param code: An identifier to help identify the log entry in a bunch of other log entries.
:param level: The severity level of this message, defaults to `info`. Note that the level `error`
forces the computation to be stopped!
:param message: A message to send in addition to the data.
:return: The data as passed to the `data` parameter without any modification.
"""
return debug(data=self, code=code, level=level, message=message)
def dimension_labels(self, dimension) -> 'ProcessBuilder':
"""
Get the dimension labels
:param self: The data cube.
:param dimension: The name of the dimension to get the labels for.
:return: The labels as an array.
"""
return dimension_labels(data=self, dimension=dimension)
def divide(self, y) -> 'ProcessBuilder':
"""
Division of two numbers
:param self: The dividend.
:param y: The divisor.
:return: The computed result.
"""
return divide(x=self, y=y)
def drop_dimension(self, name) -> 'ProcessBuilder':
"""
Remove a dimension
:param self: The data cube to drop a dimension from.
:param name: Name of the dimension to drop.
:return: A data cube without the specified dimension. The number of dimensions decreases by one, but
the dimension properties (name, type, labels, reference system and resolution) for all other dimensions
remain unchanged.
"""
return drop_dimension(data=self, name=name)
def e(self) -> 'ProcessBuilder':
"""
Euler's number (e)
:return: The numerical value of Euler's number.
"""
return e()
def eq(self, y, delta=UNSET, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Equal to comparison
:param self: First operand.
:param y: Second operand.
:param delta: Only applicable for comparing two numbers. If this optional parameter is set to a
positive non-zero number the equality of two numbers is checked against a delta value. This is
especially useful to circumvent problems with floating-point inaccuracy in machine-based computation.
This option is basically an alias for the following computation: `lte(abs(minus([x, y]), delta)`
:param case_sensitive: Only applicable for comparing two strings. Case sensitive comparison can be
disabled by setting this parameter to `false`.
:return: `true` if `x` is equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return eq(x=self, y=y, delta=delta, case_sensitive=case_sensitive)
def exp(self) -> 'ProcessBuilder':
"""
Exponentiation to the base e
:param self: The numerical exponent.
:return: The computed value for *e* raised to the power of `p`.
"""
return exp(p=self)
def extrema(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Minimum and maximum values
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that an array with two `null` values is
returned if any value is such a value.
:return: An array containing the minimum and maximum values for the specified numbers. The first
element is the minimum, the second element is the maximum. If the input array is empty both elements
are set to `null`.
"""
return extrema(data=self, ignore_nodata=ignore_nodata)
def filter_bands(self, bands=UNSET, wavelengths=UNSET) -> 'ProcessBuilder':
"""
Filter the bands by names
:param self: A data cube with bands.
:param bands: A list of band names. Either the unique band name (metadata field `name` in bands) or one
of the common band names (metadata field `common_name` in bands). If the unique band name and the
common name conflict, the unique band name has a higher priority. The order of the specified array
defines the order of the bands in the data cube. If multiple bands match a common name, all matched
bands are included in the original order.
:param wavelengths: A list of sub-lists with each sub-list consisting of two elements. The first
element is the minimum wavelength and the second element is the maximum wavelength. Wavelengths are
specified in micrometers (μm). The order of the specified array defines the order of the bands in the
data cube. If multiple bands match the wavelengths, all matched bands are included in the original
order.
:return: A data cube limited to a subset of its original bands. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the dimension of
type `bands` has less (or the same) dimension labels.
"""
return filter_bands(data=self, bands=bands, wavelengths=wavelengths)
def filter_bbox(self, extent) -> 'ProcessBuilder':
"""
Spatial filter using a bounding box
:param self: A data cube.
:param extent: A bounding box, which may include a vertical axis (see `base` and `height`).
:return: A data cube restricted to the bounding box. The dimensions and dimension properties (name,
type, labels, reference system and resolution) remain unchanged, except that the spatial dimensions
have less (or the same) dimension labels.
"""
return filter_bbox(data=self, extent=extent)
def filter_labels(self, condition, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Filter dimension labels based on a condition
:param self: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified
dimension. A dimension label and the corresponding data is preserved for the given dimension, if the
condition returns `true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable` error if
the specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same)
dimension labels.
"""
return filter_labels(data=self, condition=condition, dimension=dimension, context=context)
def filter_labels(self, condition, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Filter dimension labels based on a condition
:param self: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified
dimension. A dimension label and the corresponding data is preserved for the given dimension, if the
condition returns `true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable`
exception if the specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same)
dimension labels.
"""
return filter_labels(data=self, condition=condition, dimension=dimension, context=context)
def filter_spatial(self, geometries) -> 'ProcessBuilder':
"""
Spatial filter using geometries
:param self: A data cube.
:param geometries: One or more geometries used for filtering, specified as GeoJSON.
:return: A data cube restricted to the specified geometries. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the spatial
dimensions have less (or the same) dimension labels.
"""
return filter_spatial(data=self, geometries=geometries)
def filter_temporal(self, extent, dimension=UNSET) -> 'ProcessBuilder':
"""
Temporal filter for a temporal intervals
:param self: A data cube.
:param extent: Left-closed temporal interval, i.e. an array with exactly two elements: 1. The first
element is the start of the temporal interval. The specified instance in time is **included** in the
interval. 2. The second element is the end of the temporal interval. The specified instance in time is
**excluded** from the interval. The specified temporal strings follow [RFC 3339](https://www.rfc-
editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`,
but never both.
:param dimension: The name of the temporal dimension to filter on. If no specific dimension is
specified or it is set to `null`, the filter applies to all temporal dimensions. Fails with a
`DimensionNotAvailable` exception if the specified dimension does not exist.
:return: A data cube restricted to the specified temporal extent. The dimensions and dimension
properties (name, type, labels, reference system and resolution) remain unchanged, except that the
temporal dimensions (determined by `dimensions` parameter) may have less dimension labels.
"""
return filter_temporal(data=self, extent=extent, dimension=dimension)
def first(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
First element
:param self: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if the first value is
such a value.
:return: The first element of the input array.
"""
return first(data=self, ignore_nodata=ignore_nodata)
def floor(self) -> 'ProcessBuilder':
"""
Round fractions down
:param self: A number to round down.
:return: The number rounded down.
"""
return floor(x=self)
def gt(self, y) -> 'ProcessBuilder':
"""
Greater than comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly greater than `y` or `null` if any operand is `null`, otherwise
`false`.
"""
return gt(x=self, y=y)
def gte(self, y) -> 'ProcessBuilder':
"""
Greater than or equal to comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is greater than or equal to `y`, `null` if any operand is `null`, otherwise
`false`.
"""
return gte(x=self, y=y)
def if_(self, accept, reject=UNSET) -> 'ProcessBuilder':
"""
If-Then-Else conditional
:param self: A boolean value.
:param accept: A value that is returned if the boolean value is `true`.
:param reject: A value that is returned if the boolean value is **not** `true`. Defaults to `null`.
:return: Either the `accept` or `reject` argument depending on the given boolean value.
"""
return if_(value=self, accept=accept, reject=reject)
def int(self) -> 'ProcessBuilder':
"""
Integer part of a number
:param self: A number.
:return: Integer part of the number.
"""
return int(x=self)
def is_infinite(self) -> 'ProcessBuilder':
"""
Value is an infinite number
:param self: The data to check.
:return: `true` if the data is an infinite number, otherwise `false`.
"""
return is_infinite(x=self)
def is_nan(self) -> 'ProcessBuilder':
"""
Value is not a number
:param self: The data to check.
:return: `true` if the data is not a number, otherwise `false`.
"""
return is_nan(x=self)
def is_nodata(self) -> 'ProcessBuilder':
"""
Value is not a no-data value
:param self: The data to check.
:return: `true` if the data is a no-data value, otherwise `false`.
"""
return is_nodata(x=self)
def is_valid(self) -> 'ProcessBuilder':
"""
Value is valid data
:param self: The data to check.
:return: `true` if the data is valid, otherwise `false`.
"""
return is_valid(x=self)
def last(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Last element
:param self: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if the last value is
such a value.
:return: The last element of the input array.
"""
return last(data=self, ignore_nodata=ignore_nodata)
def linear_scale_range(self, inputMin, inputMax, outputMin=UNSET, outputMax=UNSET) -> 'ProcessBuilder':
"""
Linear transformation between two ranges
:param self: A number to transform. The number gets clipped to the bounds specified in `inputMin` and
`inputMax`.
:param inputMin: Minimum value the input can obtain.
:param inputMax: Maximum value the input can obtain.
:param outputMin: Minimum value of the desired output range.
:param outputMax: Maximum value of the desired output range.
:return: The transformed number.
"""
return linear_scale_range(x=self, inputMin=inputMin, inputMax=inputMax, outputMin=outputMin, outputMax=outputMax)
def ln(self) -> 'ProcessBuilder':
"""
Natural logarithm
:param self: A number to compute the natural logarithm for.
:return: The computed natural logarithm.
"""
return ln(x=self)
def load_collection(self, spatial_extent, temporal_extent, bands=UNSET, properties=UNSET) -> 'ProcessBuilder':
"""
Load a collection
:param self: The collection id.
:param spatial_extent: Limits the data to load from the collection to the specified bounding box or
polygons. The process puts a pixel into the data cube if the point at the pixel center intersects with
the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC). The
GeoJSON can be one of the following feature types: * A `Polygon` or `MultiPolygon` geometry, * a
`Feature` with a `Polygon` or `MultiPolygon` geometry, * a `FeatureCollection` containing at least one
`Feature` with `Polygon` or `MultiPolygon` geometries, or * a `GeometryCollection` containing `Polygon`
or `MultiPolygon` geometries. To maximize interoperability, `GeometryCollection` should be avoided in
favour of one of the alternatives above. Set this parameter to `null` to set no limit for the spatial
extent. Be careful with this when loading large datasets! It is recommended to use this parameter
instead of using ``filter_bbox()`` or ``filter_spatial()`` directly after loading unbounded data.
:param temporal_extent: Limits the data to load from the collection to the specified left-closed
temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array
with exactly two elements: 1. The first element is the start of the temporal interval. The specified
instance in time is **included** in the interval. 2. The second element is the end of the temporal
interval. The specified instance in time is **excluded** from the interval. The specified temporal
strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by
setting one of the boundaries to `null`, but never both. Set this parameter to `null` to set no limit
for the temporal extent. Be careful with this when loading large datasets! It is recommended to use
this parameter instead of using ``filter_temporal()`` directly after loading unbounded data.
:param bands: Only adds the specified bands into the data cube so that bands that don't match the list
of band names are not available. Applies to all dimensions of type `bands`. Either the unique band
name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in
bands) can be specified. If the unique band name and the common name conflict, the unique band name has
a higher priority. The order of the specified array defines the order of the bands in the data cube.
If multiple bands match a common name, all matched bands are included in the original order. It is
recommended to use this parameter instead of using ``filter_bands()`` directly after loading unbounded
data.
:param properties: Limits the data by metadata properties to include only data in the data cube which
all given conditions return `true` for (AND operation). Specify key-value-pairs with the key being the
name of the metadata property, which can be retrieved with the openEO Data Discovery for Collections.
The value must a condition (user-defined process) to be evaluated against the collection metadata, see
the example.
:return: A data cube for further processing. The dimensions and dimension properties (name, type,
labels, reference system and resolution) correspond to the collection's metadata, but the dimension
labels are restricted as specified in the parameters.
"""
return load_collection(id=self, spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands, properties=properties)
def load_result(self) -> 'ProcessBuilder':
"""
Load batch job results
:param self: The id of a batch job with results.
:return: A data cube for further processing.
"""
return load_result(id=self)
def load_result(self) -> 'ProcessBuilder':
"""
Load batch job results
:param self: The id of a batch job with results.
:return: A data cube for further processing.
"""
return load_result(id=self)
def load_uploaded_files(self, format, options=UNSET) -> 'ProcessBuilder':
"""
Load files from the user workspace
:param self: The files to read. Folders can't be specified, instead specify all files. An error is
thrown if a file can't be read.
:param format: The file format to read from. It must be one of the values that the server reports as
supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is
not suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is
*case insensitive*.
:param options: The file format parameters to be used to read the files. Must correspond to the
parameters that the server reports as supported parameters for the chosen `format`. The parameter names
and valid values usually correspond to the GDAL/OGR format options.
:return: A data cube for further processing.
"""
return load_uploaded_files(paths=self, format=format, options=options)
def load_uploaded_files(self, format, options=UNSET) -> 'ProcessBuilder':
"""
Load files from the user workspace
:param self: The files to read. Folders can't be specified, specify all files instead. An exception is
thrown if a file can't be read.
:param format: The file format to read from. It must be one of the values that the server reports as
supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is
not suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is
*case insensitive*.
:param options: The file format parameters to be used to read the files. Must correspond to the
parameters that the server reports as supported parameters for the chosen `format`. The parameter names
and valid values usually correspond to the GDAL/OGR format options.
:return: A data cube for further processing.
"""
return load_uploaded_files(paths=self, format=format, options=options)
def log(self, base) -> 'ProcessBuilder':
"""
Logarithm to a base
:param self: A number to compute the logarithm for.
:param base: The numerical base.
:return: The computed logarithm.
"""
return log(x=self, base=base)
def lt(self, y) -> 'ProcessBuilder':
"""
Less than comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly less than `y`, `null` if any operand is `null`, otherwise `false`.
"""
return lt(x=self, y=y)
def lte(self, y) -> 'ProcessBuilder':
"""
Less than or equal to comparison
:param self: First operand.
:param y: Second operand.
:return: `true` if `x` is less than or equal to `y`, `null` if any operand is `null`, otherwise
`false`.
"""
return lte(x=self, y=y)
def mask(self, mask, replacement=UNSET) -> 'ProcessBuilder':
"""
Apply a raster mask
:param self: A raster data cube.
:param mask: A mask as a raster data cube. Every pixel in `data` must have a corresponding element in
`mask`.
:param replacement: The value used to replace masked values with.
:return: A masked raster data cube with the same dimensions. The dimension properties (name, type,
labels, reference system and resolution) remain unchanged.
"""
return mask(data=self, mask=mask, replacement=replacement)
def mask_polygon(self, mask, replacement=UNSET, inside=UNSET) -> 'ProcessBuilder':
"""
Apply a polygon mask
:param self: A raster data cube.
:param mask: A GeoJSON object containing at least one polygon. The provided feature types can be one of
the following: * A `Polygon` or `MultiPolygon` geometry, * a `Feature` with a `Polygon` or
`MultiPolygon` geometry, * a `FeatureCollection` containing at least one `Feature` with `Polygon` or
`MultiPolygon` geometries, or * a `GeometryCollection` containing `Polygon` or `MultiPolygon`
geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of
the alternatives above.
:param replacement: The value used to replace masked values with.
:param inside: If set to `true` all pixels for which the point at the pixel center **does** intersect
with any polygon are replaced.
:return: A masked raster data cube with the same dimensions. The dimension properties (name, type,
labels, reference system and resolution) remain unchanged.
"""
return mask_polygon(data=self, mask=mask, replacement=replacement, inside=inside)
def max(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Maximum value
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The maximum value.
"""
return max(data=self, ignore_nodata=ignore_nodata)
def mean(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Arithmetic mean (average)
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed arithmetic mean.
"""
return mean(data=self, ignore_nodata=ignore_nodata)
def median(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Statistical median
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed statistical median.
"""
return median(data=self, ignore_nodata=ignore_nodata)
def merge_cubes(self, cube2, overlap_resolver=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Merge two data cubes
:param self: The first data cube.
:param cube2: The second data cube.
:param overlap_resolver: A reduction operator that resolves the conflict if the data overlaps. The
reducer must return a value of the same data type as the input values are. The reduction operator may
be a single process such as ``multiply()`` or consist of multiple sub-processes. `null` (the default)
can be specified if no overlap resolver is required.
:param context: Additional data to be passed to the overlap resolver.
:return: The merged data cube. See the process description for details regarding the dimensions and
dimension properties (name, type, labels, reference system and resolution).
"""
return merge_cubes(cube1=self, cube2=cube2, overlap_resolver=overlap_resolver, context=context)
def min(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Minimum value
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The minimum value.
"""
return min(data=self, ignore_nodata=ignore_nodata)
def mod(self, y) -> 'ProcessBuilder':
"""
Modulo
:param self: A number to be used as the dividend.
:param y: A number to be used as the divisor.
:return: The remainder after division.
"""
return mod(x=self, y=y)
def multiply(self, y) -> 'ProcessBuilder':
"""
Multiplication of two numbers
:param self: The multiplier.
:param y: The multiplicand.
:return: The computed product of the two numbers.
"""
return multiply(x=self, y=y)
def nan(self) -> 'ProcessBuilder':
"""
Not a Number (NaN)
:return: Returns `NaN`.
"""
return nan()
def ndvi(self, nir=UNSET, red=UNSET, target_band=UNSET) -> 'ProcessBuilder':
"""
Normalized Difference Vegetation Index
:param self: A raster data cube with two bands that have the common names `red` and `nir` assigned.
:param nir: The name of the NIR band. Defaults to the band that has the common name `nir` assigned.
Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata
field `common_name` in bands) can be specified. If the unique band name and the common name conflict,
the unique band name has a higher priority.
:param red: The name of the red band. Defaults to the band that has the common name `red` assigned.
Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata
field `common_name` in bands) can be specified. If the unique band name and the common name conflict,
the unique band name has a higher priority.
:param target_band: By default, the dimension of type `bands` is dropped. To keep the dimension specify
a new band name in this parameter so that a new dimension label with the specified name will be added
for the computed values.
:return: A raster data cube containing the computed NDVI values. The structure of the data cube differs
depending on the value passed to `target_band`: * `target_band` is `null`: The data cube does not
contain the dimension of type `bands`, the number of dimensions decreases by one. The dimension
properties (name, type, labels, reference system and resolution) for all other dimensions remain
unchanged. * `target_band` is a string: The data cube keeps the same dimensions. The dimension
properties remain unchanged, but the number of dimension labels for the dimension of type `bands`
increases by one. The additional label is named as specified in `target_band`.
"""
return ndvi(data=self, nir=nir, red=red, target_band=target_band)
def neq(self, y, delta=UNSET, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Not equal to comparison
:param self: First operand.
:param y: Second operand.
:param delta: Only applicable for comparing two numbers. If this optional parameter is set to a
positive non-zero number the non-equality of two numbers is checked against a delta value. This is
especially useful to circumvent problems with floating-point inaccuracy in machine-based computation.
This option is basically an alias for the following computation: `gt(abs(minus([x, y]), delta)`
:param case_sensitive: Only applicable for comparing two strings. Case sensitive comparison can be
disabled by setting this parameter to `false`.
:return: `true` if `x` is *not* equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return neq(x=self, y=y, delta=delta, case_sensitive=case_sensitive)
def normalized_difference(self, y) -> 'ProcessBuilder':
"""
Normalized difference
:param self: The value for the first band.
:param y: The value for the second band.
:return: The computed normalized difference.
"""
return normalized_difference(x=self, y=y)
def not_(self) -> 'ProcessBuilder':
"""
Inverting a boolean
:param self: Boolean value to invert.
:return: Inverted boolean value.
"""
return not_(x=self)
def or_(self, y) -> 'ProcessBuilder':
"""
Logical OR
:param self: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical OR.
"""
return or_(x=self, y=y)
def order(self, asc=UNSET, nodata=UNSET) -> 'ProcessBuilder':
"""
Create a permutation
:param self: An array to compute the order for.
:param asc: The default sort order is ascending, with smallest values first. To sort in reverse
(descending) order, set this parameter to `false`.
:param nodata: Controls the handling of no-data values (`null`). By default, they are removed. If set
to `true`, missing values in the data are put last; if set to `false`, they are put first.
:return: The computed permutation.
"""
return order(data=self, asc=asc, nodata=nodata)
def pi(self) -> 'ProcessBuilder':
"""
Pi (π)
:return: The numerical value of Pi.
"""
return pi()
def power(self, p) -> 'ProcessBuilder':
"""
Exponentiation
:param self: The numerical base.
:param p: The numerical exponent.
:return: The computed value for `base` raised to the power of `p`.
"""
return power(base=self, p=p)
def product(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Compute the product by multiplying numbers
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed product of the sequence of numbers.
"""
return product(data=self, ignore_nodata=ignore_nodata)
def quantiles(self, probabilities=UNSET, q=UNSET, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Quantiles
:param self: An array of numbers.
:param probabilities: A list of probabilities to calculate quantiles for. The probabilities must be
between 0 and 1.
:param q: Intervals to calculate quantiles for. Calculates q-quantiles with (nearly) equal-sized
intervals.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that an array with `null` values is returned
if any element is such a value.
:return: An array with the computed quantiles. The list has either * as many elements as the given
list of `probabilities` had or * *`q`-1* elements. If the input array is empty the resulting array is
filled with as many `null` values as required according to the list above. See the 'Empty array'
example for an example.
"""
return quantiles(data=self, probabilities=probabilities, q=q, ignore_nodata=ignore_nodata)
def rearrange(self, order) -> 'ProcessBuilder':
"""
Rearrange an array based on a permutation
:param self: The array to rearrange.
:param order: The permutation used for rearranging.
:return: The rearranged array.
"""
return rearrange(data=self, order=order)
def reduce_dimension(self, reducer, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Reduce dimensions
:param self: A data cube.
:param reducer: A reducer to apply on the specified dimension. A reducer is a single process such as
``mean()`` or a set of processes, which computes a single value for a list of values, see the category
'reducer' for such processes.
:param dimension: The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable`
exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and
resolution) for all other dimensions remain unchanged.
"""
return reduce_dimension(data=self, reducer=reducer, dimension=dimension, context=context)
def reduce_dimension_binary(self, reducer, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Reduce dimensions using binary reduction
:param self: A data cube.
:param reducer: A reduction operator to be applied consecutively on pairs of values. It must be both
associative and commutative as the execution may be executed in parallel and therefore the order of
execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or
consist of multiple sub-processes.
:param dimension: The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable`
error if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and
resolution) for all other dimensions remain unchanged.
"""
return reduce_dimension_binary(data=self, reducer=reducer, dimension=dimension, context=context)
def reduce_spatial(self, reducer, context=UNSET) -> 'ProcessBuilder':
"""
Reduce spatial dimensions 'x' and 'y'
:param self: A data cube.
:param reducer: A reducer to apply on the horizontal spatial dimensions. A reducer is a single process
such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the
category 'reducer' for such processes.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the horizontal spatial dimensions,
the number of dimensions decreases by two. The dimension properties (name, type, labels, reference
system and resolution) for all other dimensions remain unchanged.
"""
return reduce_spatial(data=self, reducer=reducer, context=context)
def rename_dimension(self, source, target) -> 'ProcessBuilder':
"""
Rename a dimension
:param self: The data cube.
:param source: The current name of the dimension. Fails with a `DimensionNotAvailable` exception if the
specified dimension does not exist.
:param target: A new Name for the dimension. Fails with a `DimensionExists` exception if a dimension
with the specified name exists.
:return: A data cube with the same dimensions, but the name of one of the dimensions changes. The old
name can not be referred to any longer. The dimension properties (name, type, labels, reference system
and resolution) remain unchanged.
"""
return rename_dimension(data=self, source=source, target=target)
def rename_labels(self, dimension, target, source=UNSET) -> 'ProcessBuilder':
"""
Rename dimension labels
:param self: The data cube.
:param dimension: The name of the dimension to rename the labels for.
:param target: The new names for the labels. The dimension labels in the data cube are expected to be
enumerated if the parameter `target` is not specified. If a target dimension label already exists in
the data cube, a `LabelExists` exception is thrown.
:param source: The names of the labels as they are currently in the data cube. The array defines an
unsorted and potentially incomplete list of labels that should be renamed to the names available in the
corresponding array elements in the parameter `target`. If one of the source dimension labels doesn't
exist, the `LabelNotAvailable` exception is thrown. By default, the array is empty so that the
dimension labels in the data cube are expected to be enumerated.
:return: The data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except that for the given dimension the labels
change. The old labels can not be referred to any longer. The number of labels remains the same.
"""
return rename_labels(data=self, dimension=dimension, target=target, source=source)
def resample_cube_spatial(self, target, method=UNSET) -> 'ProcessBuilder':
"""
Resample the spatial dimensions to match a target data cube
:param self: A data cube.
:param target: A data cube that describes the spatial target resolution.
:param method: Resampling method to use. The following options are available and are meant to align
with [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r): * `average`: average
(mean) resampling, computes the weighted average of all valid pixels * `bilinear`: bilinear resampling
* `cubic`: cubic resampling * `cubicspline`: cubic spline resampling * `lanczos`: Lanczos windowed sinc
resampling * `max`: maximum resampling, selects the maximum value from all valid pixels * `med`: median
resampling, selects the median value of all valid pixels * `min`: minimum resampling, selects the
minimum value from all valid pixels * `mode`: mode resampling, selects the value which appears most
often of all the sampled points * `near`: nearest neighbour resampling (default) * `q1`: first quartile
resampling, selects the first quartile value of all valid pixels * `q3`: third quartile resampling,
selects the third quartile value of all valid pixels * `rms` root mean square (quadratic mean) of all
valid pixels * `sum`: compute the weighted sum of all valid pixels Valid pixels are determined based
on the function ``is_valid()``.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the spatial
dimensions.
"""
return resample_cube_spatial(data=self, target=target, method=method)
def resample_cube_temporal(self, target, method, dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Resample a temporal dimension to match a target data cube
:param self: A data cube.
:param target: A data cube that describes the temporal target resolution.
:param method: A resampling method to be applied, could be a reducer for downsampling or other methods
for upsampling. A reducer is a single process such as ``mean()`` or a set of processes, which computes
a single value for a list of values, see the category 'reducer' for such processes.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in
both data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only
have one temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails
with a `DimensionNotAvailable` error if the specified dimension does not exist.
:param context: Additional data to be passed to the process specified for the parameter `method`.
:return: A raster data cube with the same dimensions and the same dimension properties (name, type,
labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension
the name and type remain unchanged, but the reference system changes and the labels and resolution may
change.
"""
return resample_cube_temporal(data=self, target=target, method=method, dimension=dimension, context=context)
def resample_cube_temporal(self, target, dimension=UNSET, valid_within=UNSET) -> 'ProcessBuilder':
"""
Resample temporal dimensions to match a target data cube
:param self: A data cube with one or more temporal dimensions.
:param target: A data cube that describes the temporal target resolution.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in
both data cubes. If the dimension is not set or is set to `null`, the process resamples all temporal
dimensions that exist with the same names in both data cubes. The following exceptions may occur: * A
dimension is given, but it does not exist in any of the data cubes: `DimensionNotAvailable` * A
dimension is given, but one of them is not temporal: `DimensionMismatch` * No specific dimension name
is given and there are no temporal dimensions with the same name in the data: `DimensionMismatch`
:param valid_within: Setting this parameter to a numerical value enables that the process searches for
valid values within the given period of days before and after the target timestamps. Valid values are
determined based on the function ``is_valid()``. For example, the limit of `7` for the target
timestamps `2020-01-15 12:00:00` looks for a nearest neighbor after `2020-01-08 12:00:00` and before
`2020-01-22 12:00:00`. If no valid value is found within the given period, the value will be set to no-
data (`null`).
:return: A raster data cube with the same dimensions and the same dimension properties (name, type,
labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension,
the name and type remain unchanged, but the dimension labels, resolution and reference system may
change.
"""
return resample_cube_temporal(data=self, target=target, dimension=dimension, valid_within=valid_within)
def resample_spatial(self, resolution=UNSET, projection=UNSET, method=UNSET, align=UNSET) -> 'ProcessBuilder':
"""
Resample and warp the spatial dimensions
:param self: A raster data cube.
:param resolution: Resamples the data cube to the target resolution, which can be specified either as
separate values for x and y or as a single value for both axes. Specified in the units of the target
projection. Doesn't change the resolution by default (`0`).
:param projection: Warps the data cube to the target projection, specified as as [EPSG
code](http://www.epsg-registry.org/), [WKT2 (ISO 19162)
string](http://docs.opengeospatial.org/is/18-010r7/18-010r7.html), [PROJ definition
(deprecated)](https://proj.org/usage/quickstart.html). By default (`null`), the projection is not
changed.
:param method: Resampling method to use. The following options are available and are meant to align
with [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r): * `average`: average
(mean) resampling, computes the weighted average of all valid pixels * `bilinear`: bilinear resampling
* `cubic`: cubic resampling * `cubicspline`: cubic spline resampling * `lanczos`: Lanczos windowed sinc
resampling * `max`: maximum resampling, selects the maximum value from all valid pixels * `med`: median
resampling, selects the median value of all valid pixels * `min`: minimum resampling, selects the
minimum value from all valid pixels * `mode`: mode resampling, selects the value which appears most
often of all the sampled points * `near`: nearest neighbour resampling (default) * `q1`: first quartile
resampling, selects the first quartile value of all valid pixels * `q3`: third quartile resampling,
selects the third quartile value of all valid pixels * `rms` root mean square (quadratic mean) of all
valid pixels * `sum`: compute the weighted sum of all valid pixels Valid pixels are determined based
on the function ``is_valid()``.
:param align: Specifies to which corner of the spatial extent the new resampled data is aligned to.
:return: A raster data cube with values warped onto the new projection. It has the same dimensions and
the same dimension properties (name, type, labels, reference system and resolution) for all non-spatial
or vertical spatial dimensions. For the horizontal spatial dimensions the name and type remain
unchanged, but reference system, labels and resolution may change depending on the given parameters.
"""
return resample_spatial(data=self, resolution=resolution, projection=projection, method=method, align=align)
def round(self, p=UNSET) -> 'ProcessBuilder':
"""
Round to a specified precision
:param self: A number to round.
:param p: A positive number specifies the number of digits after the decimal point to round to. A
negative number means rounding to a power of ten, so for example *-2* rounds to the nearest hundred.
Defaults to *0*.
:return: The rounded number.
"""
return round(x=self, p=p)
def run_udf(self, udf, runtime, version=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Run a UDF
:param self: The data to be passed to the UDF as an array or raster data cube.
:param udf: Either source code, an absolute URL or a path to a UDF script.
:param runtime: A UDF runtime identifier available at the back-end.
:param version: An UDF runtime version. If set to `null`, the default runtime version specified for
each runtime is used.
:param context: Additional data such as configuration options to be passed to the UDF.
:return: The data processed by the UDF. * Returns a raster data cube, if a raster data cube is passed
for `data`. Details on the dimensions and dimension properties (name, type, labels, reference system
and resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be of any
data type, but is exactly what the UDF returns.
"""
return run_udf(data=self, udf=udf, runtime=runtime, version=version, context=context)
def run_udf_externally(self, url, context=UNSET) -> 'ProcessBuilder':
"""
Run an externally hosted UDF container
:param self: The data to be passed to the UDF as array or raster data cube.
:param url: URL to a remote UDF service.
:param context: Additional data such as configuration options that should be passed to the UDF.
:return: The data processed by the UDF service. * Returns a raster data cube, if a raster data cube is
passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference
system and resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be
of any data type, but is exactly what the UDF returns.
"""
return run_udf_externally(data=self, url=url, context=context)
def run_udf_externally(self, url, context=UNSET) -> 'ProcessBuilder':
"""
Run an externally hosted UDF container
:param self: The data to be passed to the UDF as an array or raster data cube.
:param url: Absolute URL to a remote UDF service.
:param context: Additional data such as configuration options to be passed to the UDF.
:return: The data processed by the UDF service. * Returns a raster data cube if a raster data cube is
passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference
system and resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be
of any data type, but is exactly what the UDF returns.
"""
return run_udf_externally(data=self, url=url, context=context)
def sar_backscatter(self, coefficient=UNSET, elevation_model=UNSET, mask=UNSET, contributing_area=UNSET, local_incidence_angle=UNSET, ellipsoid_incidence_angle=UNSET, noise_removal=UNSET) -> 'ProcessBuilder':
"""
Computes backscatter from SAR input
:param self: The source data cube containing SAR input.
:param coefficient: Select the radiometric correction coefficient. The following options are available:
* `beta0`: radar brightness * `sigma0-ellipsoid`: ground area computed with ellipsoid earth model *
`sigma0-terrain`: ground area computed with terrain earth model * `gamma0-ellipsoid`: ground area
computed with ellipsoid earth model in sensor line of sight * `gamma0-terrain`: ground area computed
with terrain earth model in sensor line of sight (default) * `null`: non-normalized backscatter
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the
back-end to choose, which will improve portability, but reduce reproducibility.
:param mask: If set to `true`, a data mask is added to the bands with the name `mask`. It indicates
which values are valid (1), invalid (0) or contain no-data (null).
:param contributing_area: If set to `true`, a DEM-based local contributing area band named
`contributing_area` is added. The values are given in square meters.
:param local_incidence_angle: If set to `true`, a DEM-based local incidence angle band named
`local_incidence_angle` is added. The values are given in degrees.
:param ellipsoid_incidence_angle: If set to `true`, an ellipsoidal incidence angle band named
`ellipsoid_incidence_angle` is added. The values are given in degrees.
:param noise_removal: If set to `false`, no noise removal is applied. Defaults to `true`, which removes
noise.
:return: Backscatter values corresponding to the chosen parametrization. The values are given in linear
scale.
"""
return sar_backscatter(data=self, coefficient=coefficient, elevation_model=elevation_model, mask=mask, contributing_area=contributing_area, local_incidence_angle=local_incidence_angle, ellipsoid_incidence_angle=ellipsoid_incidence_angle, noise_removal=noise_removal)
def save_result(self, format, options=UNSET) -> 'ProcessBuilder':
"""
Save processed data to storage
:param self: The data to save.
:param format: The file format to save to. It must be one of the values that the server reports as
supported output file formats, which usually correspond to the short GDAL/OGR codes. If the format is
not suitable for storing the underlying data structure, a `FormatUnsuitable` exception will be thrown.
This parameter is *case insensitive*.
:param options: The file format parameters to be used to create the file(s). Must correspond to the
parameters that the server reports as supported parameters for the chosen `format`. The parameter names
and valid values usually correspond to the GDAL/OGR format options.
:return: `false` if saving failed, `true` otherwise.
"""
return save_result(data=self, format=format, options=options)
def sd(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Standard deviation
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed sample standard deviation.
"""
return sd(data=self, ignore_nodata=ignore_nodata)
def sgn(self) -> 'ProcessBuilder':
"""
Signum
:param self: A number.
:return: The computed signum value of `x`.
"""
return sgn(x=self)
def sin(self) -> 'ProcessBuilder':
"""
Sine
:param self: An angle in radians.
:return: The computed sine of `x`.
"""
return sin(x=self)
def sinh(self) -> 'ProcessBuilder':
"""
Hyperbolic sine
:param self: An angle in radians.
:return: The computed hyperbolic sine of `x`.
"""
return sinh(x=self)
def sort(self, asc=UNSET, nodata=UNSET) -> 'ProcessBuilder':
"""
Sort data
:param self: An array with data to sort.
:param asc: The default sort order is ascending, with smallest values first. To sort in reverse
(descending) order, set this parameter to `false`.
:param nodata: Controls the handling of no-data values (`null`). By default, they are removed. If set
to `true`, missing values in the data are put last; if set to `false`, they are put first.
:return: The sorted array.
"""
return sort(data=self, asc=asc, nodata=nodata)
def sqrt(self) -> 'ProcessBuilder':
"""
Square root
:param self: A number.
:return: The computed square root.
"""
return sqrt(x=self)
def subtract(self, y) -> 'ProcessBuilder':
"""
Subtraction of two numbers
:param self: The minuend.
:param y: The subtrahend.
:return: The computed result.
"""
return subtract(x=self, y=y)
def sum(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Compute the sum by adding up numbers
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed sum of the sequence of numbers.
"""
return sum(data=self, ignore_nodata=ignore_nodata)
def tan(self) -> 'ProcessBuilder':
"""
Tangent
:param self: An angle in radians.
:return: The computed tangent of `x`.
"""
return tan(x=self)
def tanh(self) -> 'ProcessBuilder':
"""
Hyperbolic tangent
:param self: An angle in radians.
:return: The computed hyperbolic tangent of `x`.
"""
return tanh(x=self)
def text_begins(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text begins with another text
:param self: Text in which to find something at the beginning.
:param pattern: Text to find at the beginning of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` begins with `pattern`, false` otherwise.
"""
return text_begins(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_contains(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text contains another text
:param self: Text in which to find something in.
:param pattern: Text to find in `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` contains the `pattern`, false` otherwise.
"""
return text_contains(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_ends(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text ends with another text
:param self: Text in which to find something at the end.
:param pattern: Text to find at the end of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` ends with `pattern`, false` otherwise.
"""
return text_ends(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_merge(self, separator=UNSET) -> 'ProcessBuilder':
"""
Concatenate elements to a single text
:param self: A set of elements. Numbers, boolean values and null values get converted to their (lower
case) string representation. For example: `1` (integer), `-1.5` (number), `true` / `false` (boolean
values)
:param separator: A separator to put between each of the individual texts. Defaults to an empty string.
:return: A string containing a string representation of all the array elements in the same order, with
the separator between each element.
"""
return text_merge(data=self, separator=separator)
def trim_cube(self) -> 'ProcessBuilder':
"""
Remove dimension labels with no-data values
:param self: A raster data cube to trim.
:return: A trimmed raster data cube with the same dimensions. The dimension properties name, type,
reference system and resolution remain unchanged. The number of dimension labels may decrease.
"""
return trim_cube(data=self)
def variance(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Variance
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed sample variance.
"""
return variance(data=self, ignore_nodata=ignore_nodata)
def xor(self, y) -> 'ProcessBuilder':
"""
Logical XOR (exclusive or)
:param self: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical XOR.
"""
return xor(x=self, y=y)
# Public shortcut
process = ProcessBuilder.process
# Private shortcut that has lower chance to collide with a process argument named `process`
_process = ProcessBuilder.process
def absolute(x) -> ProcessBuilder:
"""
Absolute value
:param x: A number.
:return: The computed absolute value.
"""
return _process('absolute', x=x)
def add(x, y) -> ProcessBuilder:
"""
Addition of two numbers
:param x: The first summand.
:param y: The second summand.
:return: The computed sum of the two numbers.
"""
return _process('add', x=x, y=y)
def add_dimension(data, name, label, type=UNSET) -> ProcessBuilder:
"""
Add a new dimension
:param data: A data cube to add the dimension to.
:param name: Name for the dimension.
:param label: A dimension label.
:param type: The type of dimension, defaults to `other`.
:return: The data cube with a newly added dimension. The new dimension has exactly one dimension label. All
other dimensions remain unchanged.
"""
return _process('add_dimension', data=data, name=name, label=label, type=type)
def aggregate_spatial(data, geometries, reducer, target_dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Zonal statistics for geometries
:param data: A raster data cube. The data cube must have been reduced to only contain two spatial
dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a
time series. Otherwise, this process fails with the `TooManyDimensions` exception. The data cube
implicitly gets restricted to the bounds of the geometries as if ``filter_spatial()`` would have been used
with the same values for the corresponding parameters immediately before this process.
:param geometries: Geometries as GeoJSON on which the aggregation will be based. One value will be
computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple
values will be computed, one value per contained `Feature`. For example, a single value will be computed
for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons. -
For **polygons**, the process considers all pixels for which the point at the pixel center intersects with
the corresponding polygon (as defined in the Simple Features standard by the OGC). - For **points**, the
process considers the closest pixel center. - For **lines** (line strings), the process considers all the
pixels whose centers are closest to at least one point on the line. Thus, pixels may be part of multiple
geometries and be part of multiple aggregations. To maximize interoperability, a nested
`GeometryCollection` should be avoided. Furthermore, a `GeometryCollection` composed of a single type of
geometries should be avoided in favour of the corresponding multi-part type (e.g. `MultiPolygon`).
:param reducer: A reducer to be applied on all values of each geometry. A reducer is a single process such
as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category
'reducer' for such processes.
:param target_dimension: The new dimension name to be used for storing the results. Defaults to `result`.
:param context: Additional data to be passed to the reducer.
:return: A vector data cube with the computed results and restricted to the bounds of the geometries. The
computed value is used for the dimension with the name that was specified in the parameter
`target_dimension`. The computation also stores information about the total count of pixels (valid +
invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are
added as a new dimension with a dimension name derived from `target_dimension` by adding the suffix
`_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.
"""
return _process('aggregate_spatial', data=data, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context)
def aggregate_spatial_binary(data, geometries, reducer, target_dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Zonal statistics for geometries by binary aggregation
:param data: A raster data cube. The data cube implicitly gets restricted to the bounds of the geometries
as if ``filter_spatial()`` would have been used with the same values for the corresponding parameters
immediately before this process.
:param geometries: Geometries as GeoJSON on which the aggregation will be based.
:param reducer: A reduction operator to be applied consecutively on tuples of values. It must be both
associative and commutative as the execution may be executed in parallel and therefore the order of
execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or consist of
multiple sub-processes.
:param target_dimension: The new dimension name to be used for storing the results. Defaults to `result`.
:param context: Additional data to be passed to the reducer.
:return: A vector data cube with the computed results and restricted to the bounds of the geometries. The
computed value is stored in dimension with the name that was specified in the parameter `target_dimension`.
The computation also stores information about the total count of pixels (valid + invalid pixels) and the
number of valid pixels (see ``is_valid()``) for each geometry. These values are stored as new dimension
with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has
the dimension labels `total_count` and `valid_count`.
"""
return _process('aggregate_spatial_binary', data=data, geometries=geometries, reducer=reducer, target_dimension=target_dimension, context=context)
def aggregate_spatial_window(data, reducer, size, boundary=UNSET, align=UNSET, context=UNSET) -> ProcessBuilder:
"""
Zonal statistics for rectangular windows
:param data: A raster data cube with exactly two horizontal spatial dimensions and an arbitrary number of
additional dimensions. The process is applied to all additional dimensions individually.
:param reducer: A reducer to be applied on the list of values, which contain all pixels covered by the
window. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single
value for a list of values, see the category 'reducer' for such processes.
:param size: Window size in pixels along the horizontal spatial dimensions. The first value corresponds to
the `x` axis, the second value corresponds to the `y` axis.
:param boundary: Behavior to apply if the number of values for the axes `x` and `y` is not a multiple of
the corresponding value in the `size` parameter. Options are: - `pad` (default): pad the data cube with
the no-data value `null` to fit the required window size. - `trim`: trim the data cube to fit the required
window size. Set the parameter `align` to specifies to which corner the data is aligned to.
:param align: If the data requires padding or trimming (see parameter `boundary`), specifies to which
corner of the spatial extent the data is aligned to. For example, if the data is aligned to the upper left,
the process pads/trims at the lower-right.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values and the same dimensions. The resolution will change
depending on the chosen values for the `size` and `boundary` parameter. It usually decreases for the
dimensions which have the corresponding parameter `size` set to values greater than 1. The dimension
labels will be set to the coordinate at the center of the window. The other dimension properties (name,
type and reference system) remain unchanged.
"""
return _process('aggregate_spatial_window', data=data, reducer=reducer, size=size, boundary=boundary, align=align, context=context)
def aggregate_temporal(data, intervals, reducer, labels=UNSET, dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Temporal aggregations
:param data: A data cube.
:param intervals: Left-closed temporal intervals, which are allowed to overlap. Each temporal interval in
the array has exactly two elements: 1. The first element is the start of the temporal interval. The
specified instance in time is **included** in the interval. 2. The second element is the end of the
temporal interval. The specified instance in time is **excluded** from the interval. The specified
temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339
prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process
allows the value '24' for the hour** of an end time in order to make it possible that left-closed time
intervals can fully cover the day.
:param reducer: A reducer to be applied for the values contained in each interval. A reducer is a single
process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see
the category 'reducer' for such processes. Intervals may not contain any values, which for most reducers
leads to no-data (`null`) values by default.
:param labels: Distinct labels for the intervals, which can contain dates and/or times. Is only required to
be specified if the values for the start of the temporal intervals are not distinct and thus the default
labels would not be unique. The number of labels and the number of groups need to be equal.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the given
temporal dimension.
"""
return _process('aggregate_temporal', data=data, intervals=intervals, reducer=reducer, labels=labels, dimension=dimension, context=context)
def aggregate_temporal_period(data, period, reducer, dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Temporal aggregations based on calendar hierarchies
:param data: A data cube.
:param period: The time intervals to aggregate. The following pre-defined values are available: * `hour`:
Hour of the day * `day`: Day of the year * `week`: Week of the year * `dekad`: Ten day periods, counted per
year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month
can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year. * `month`: Month
of the year * `season`: Three month periods of the calendar seasons (December - February, March - May, June
- August, September - November). * `tropical-season`: Six month periods of the tropical seasons (November -
April, May - October). * `year`: Proleptic years * `decade`: Ten year periods ([0-to-9
decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year
ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending in a 0.
:param reducer: A reducer to be applied for the values contained in each period. A reducer is a single
process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see
the category 'reducer' for such processes. Periods may not contain any values, which for most reducers
leads to no-data (`null`) values by default.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the given
temporal dimension. The specified temporal dimension has the following dimension labels (`YYYY` = four-
digit year, `MM` = two-digit month, `DD` two-digit day of month): * `hour`: `YYYY-MM-DD-00` - `YYYY-MM-
DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` - `YYYY-52` * `dekad`: `YYYY-00` - `YYYY-36` *
`month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf` (December - February), `YYYY-mam` (March - May),
`YYYY-jja` (June - August), `YYYY-son` (September - November). * `tropical-season`: `YYYY-ndjfma` (November
- April), `YYYY-mjjaso` (May - October). * `year`: `YYYY` * `decade`: `YYY0` * `decade-ad`: `YYY1`
"""
return _process('aggregate_temporal_period', data=data, period=period, reducer=reducer, dimension=dimension, context=context)
def all(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Are all of the values true?
:param data: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return _process('all', data=data, ignore_nodata=ignore_nodata)
def and_(x, y) -> ProcessBuilder:
"""
Logical AND
:param x: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical AND.
"""
return _process('and', x=x, y=y)
def anomaly(data, normals, period) -> ProcessBuilder:
"""
Compute anomalies
:param data: A data cube with exactly one temporal dimension and the following dimension labels for the
given period (`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month): * `hour`:
`YYYY-MM-DD-00` - `YYYY-MM-DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` - `YYYY-52` *
`dekad`: `YYYY-00` - `YYYY-36` * `month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf` (December -
February), `YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September - November). *
`tropical-season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October). * `year`: `YYYY` *
`decade`: `YYY0` * `decade-ad`: `YYY1` * `single-period` / `climatology-period`: Any
``aggregate_temporal_period()`` can compute such a data cube.
:param normals: A data cube with normals, e.g. daily, monthly or yearly values computed from a process such
as ``climatological_normal()``. Must contain exactly one temporal dimension with the following dimension
labels for the given period: * `hour`: `00` - `23` * `day`: `001` - `365` * `week`: `01` - `52` * `dekad`:
`00` - `36` * `month`: `01` - `12` * `season`: `djf` (December - February), `mam` (March - May), `jja`
(June - August), `son` (September - November) * `tropical-season`: `ndjfma` (November - April), `mjjaso`
(May - October) * `year`: Four-digit year numbers * `decade`: Four-digit year numbers, the last digit being
a `0` * `decade-ad`: Four-digit year numbers, the last digit being a `1` * `single-period` / `climatology-
period`: A single dimension label with any name is expected.
:param period: Specifies the time intervals available in the normals data cube. The following options are
available: * `hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year * `dekad`: Ten
day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The
third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10
each year. * `month`: Month of the year * `season`: Three month periods of the calendar seasons (December -
February, March - May, June - August, September - November). * `tropical-season`: Six month periods of the
tropical seasons (November - April, May - October). * `year`: Proleptic years * `decade`: Ten year periods
([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the
next year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending in a 0. * `single-period` / `climatology-
period`: A single period of arbitrary length
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged.
"""
return _process('anomaly', data=data, normals=normals, period=period)
def any(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Is at least one value true?
:param data: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return _process('any', data=data, ignore_nodata=ignore_nodata)
def apply(data, process, context=UNSET) -> ProcessBuilder:
"""
Apply a process to each pixel
:param data: A data cube.
:param process: A process that accepts and returns a single value and is applied on each individual value
in the data cube. The process may consist of multiple sub-processes and could, for example, consist of
processes such as ``abs()`` or ``linear_scale_range()``.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return _process('apply', data=data, process=process, context=context)
def apply_dimension(data, process, dimension, target_dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Apply a process to pixels along a dimension
:param data: A data cube.
:param process: Process to be applied on all pixel values. The specified process needs to accept an array
and must return an array with at least one element. A process may consist of multiple sub-processes.
:param dimension: The name of the source dimension to apply the process on. Fails with a
`DimensionNotAvailable` exception if the specified dimension does not exist.
:param target_dimension: The name of the target dimension or `null` (the default) to use the source
dimension specified in the parameter `dimension`. By specifying a target dimension, the source dimension
is removed. The target dimension with the specified name and the type `other` (see ``add_dimension()``) is
created, if it doesn't exist yet.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values. All dimensions stay the same, except for the
dimensions specified in corresponding parameters. There are three cases how the dimensions can change: 1.
The source dimension is the target dimension: - The (number of) dimensions remain unchanged as the
source dimension is the target dimension. - The source dimension properties name and type remain
unchanged. - The dimension labels, the reference system and the resolution are preserved only if the
number of pixel values in the source dimension is equal to the number of values computed by the process.
Otherwise, all other dimension properties change as defined in the list below. 2. The source dimension is
not the target dimension and the latter exists: - The number of dimensions decreases by one as the
source dimension is dropped. - The target dimension properties name and type remain unchanged. All other
dimension properties change as defined in the list below. 3. The source dimension is not the target
dimension and the latter does not exist: - The number of dimensions remain unchanged, but the source
dimension is replaced with the target dimension. - The target dimension has the specified name and the
type other. All other dimension properties are set as defined in the list below. Unless otherwise stated
above, for the given (target) dimension the following applies: - the number of dimension labels is equal
to the number of values computed by the process, - the dimension labels are incrementing integers starting
from zero, - the resolution changes, and - the reference system is undefined.
"""
return _process('apply_dimension', data=data, process=process, dimension=dimension, target_dimension=target_dimension, context=context)
def apply_kernel(data, kernel, factor=UNSET, border=UNSET, replace_invalid=UNSET) -> ProcessBuilder:
"""
Apply a spatial convolution with a kernel
:param data: A data cube.
:param kernel: Kernel as a two-dimensional array of weights. The inner level of the nested array aligns
with the `x` axis and the outer level aligns with the `y` axis. Each level of the kernel must have an
uneven number of elements, otherwise the process throws a `KernelDimensionsUneven` exception.
:param factor: A factor that is multiplied to each value after the kernel has been applied. This is
basically a shortcut for explicitly multiplying each value by a factor afterwards, which is often required
for some kernel-based algorithms such as the Gaussian blur.
:param border: Determines how the data is extended when the kernel overlaps with the borders. Defaults to
fill the border with zeroes. The following options are available: * *numeric value* - fill with a user-
defined constant number `n`: `nnnnnn|abcdefgh|nnnnnn` (default, with `n` = 0) * `replicate` - repeat the
value from the pixel at the border: `aaaaaa|abcdefgh|hhhhhh` * `reflect` - mirror/reflect from the border:
`fedcba|abcdefgh|hgfedc` * `reflect_pixel` - mirror/reflect from the center of the pixel at the border:
`gfedcb|abcdefgh|gfedcb` * `wrap` - repeat/wrap the image: `cdefgh|abcdefgh|abcdef`
:param replace_invalid: This parameter specifies the value to replace non-numerical or infinite numerical
values with. By default, those values are replaced with zeroes.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return _process('apply_kernel', data=data, kernel=kernel, factor=factor, border=border, replace_invalid=replace_invalid)
def apply_neighborhood(data, process, size, overlap=UNSET, context=UNSET) -> ProcessBuilder:
"""
Apply a process to pixels in a n-dimensional neighborhood
:param data: A data cube.
:param process: Process to be applied on all neighborhoods.
:param size: Neighborhood sizes along each dimension. This object maps dimension names to either a
physical measure (e.g. 100 m, 10 days) or pixels (e.g. 32 pixels). For dimensions not specified, the
default is to provide all values. Be aware that including all values from overly large dimensions may not
be processed at once.
:param overlap: Overlap of neighborhoods along each dimension to avoid border effects. For instance a
temporal dimension can add 1 month before and after a neighborhood. In the spatial dimensions, this is
often a number of pixels. The overlap specified is added before and after, so an overlap of 8 pixels will
add 8 pixels on both sides of the window, so 16 in total. Be aware that large overlaps increase the need
for computational resources and modifying overlapping data in subsequent operations have no effect.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return _process('apply_neighborhood', data=data, process=process, size=size, overlap=overlap, context=context)
def arccos(x) -> ProcessBuilder:
"""
Inverse cosine
:param x: A number.
:return: The computed angle in radians.
"""
return _process('arccos', x=x)
def arcosh(x) -> ProcessBuilder:
"""
Inverse hyperbolic cosine
:param x: A number.
:return: The computed angle in radians.
"""
return _process('arcosh', x=x)
def arcsin(x) -> ProcessBuilder:
"""
Inverse sine
:param x: A number.
:return: The computed angle in radians.
"""
return _process('arcsin', x=x)
def arctan(x) -> ProcessBuilder:
"""
Inverse tangent
:param x: A number.
:return: The computed angle in radians.
"""
return _process('arctan', x=x)
def arctan2(y, x) -> ProcessBuilder:
"""
Inverse tangent of two numbers
:param y: A number to be used as the dividend.
:param x: A number to be used as the divisor.
:return: The computed angle in radians.
"""
return _process('arctan2', y=y, x=x)
def ard_normalized_radar_backscatter(data, elevation_model=UNSET, contributing_area=UNSET, ellipsoid_incidence_angle=UNSET, noise_removal=UNSET) -> ProcessBuilder:
"""
CARD4L compliant SAR NRB generation
:param data: The source data cube containing SAR input.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the back-
end to choose, which will improve portability, but reduce reproducibility.
:param contributing_area: If set to `true`, a DEM-based local contributing area band named
`contributing_area` is added. The values are given in square meters.
:param ellipsoid_incidence_angle: If set to `true`, an ellipsoidal incidence angle band named
`ellipsoid_incidence_angle` is added. The values are given in degrees.
:param noise_removal: If set to `false`, no noise removal is applied. Defaults to `true`, which removes
noise.
:return: Backscatter values expressed as gamma0 in linear scale. In addition to the bands
`contributing_area` and `ellipsoid_incidence_angle` that can optionally be added with corresponding
parameters, the following bands are always added to the data cube: - `mask`: A data mask that indicates
which values are valid (1), invalid (0) or contain no-data (null). - `local_incidence_angle`: A band with
DEM-based local incidence angles in degrees. The data returned is CARD4L compliant with corresponding
metadata.
"""
return _process('ard_normalized_radar_backscatter', data=data, elevation_model=elevation_model, contributing_area=contributing_area, ellipsoid_incidence_angle=ellipsoid_incidence_angle, noise_removal=noise_removal)
def ard_surface_reflectance(data, atmospheric_correction_method, cloud_detection_method, elevation_model=UNSET, atmospheric_correction_options=UNSET, cloud_detection_options=UNSET) -> ProcessBuilder:
"""
CARD4L compliant Surface Reflectance generation
:param data: The source data cube containing multi-spectral optical top of the atmosphere (TOA)
reflectances. There must be a single dimension of type `bands` available.
:param atmospheric_correction_method: The atmospheric correction method to use.
:param cloud_detection_method: The cloud detection method to use. Each method supports detecting different
atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in
optical imagery.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the back-
end to choose, which will improve portability, but reduce reproducibility.
:param atmospheric_correction_options: Proprietary options for the atmospheric correction method.
Specifying proprietary options will reduce portability.
:param cloud_detection_options: Proprietary options for the cloud detection method. Specifying proprietary
options will reduce portability.
:return: Data cube containing bottom of atmosphere reflectances for each spectral band in the source data
cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are
directly set in the bands. Depending on the methods used, several additional bands will be added to the
data cube: Data cube containing bottom of atmosphere reflectances for each spectral band in the source
data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods
used, several additional bands will be added to the data cube: - `date` (optional): Specifies per-pixel
acquisition timestamps. - `incomplete-testing` (required): Identifies pixels with a value of 1 for which
the per-pixel tests (at least saturation, cloud and cloud shadows, see CARD4L specification for details)
have not all been successfully completed. Otherwise, the value is 0. - `saturation` (required) /
`saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not
(0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the
band name from the source data cube. - `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`,
`water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbance such as
clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an
atmospheric disturbance. - `snow-ice` (optional): Points to a file that indicates whether a pixel is
assessed as being snow/ice (1) or not (0). All values describe the probability and must be between 0 and 1.
- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values
describe the probability and must be between 0 and 1. - `incidence-angle` (optional): Specifies per-pixel
incidence angles in degrees. - `azimuth` (optional): Specifies per-pixel azimuth angles in degrees. - `sun-
azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees. - `sun-elevation` (optional):
Specifies per-pixel sun elevation angles in degrees. - `terrain-shadow` (optional): Indicates with a value
of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise, the value is 0. -
`terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor
due to terrain occlusion during off-nadir viewing. Otherwise, the value is 0. - `terrain-illumination`
(optional): Contains coefficients used for terrain illumination correction are provided for each pixel.
The data returned is CARD4L compliant with corresponding metadata.
"""
return _process('ard_surface_reflectance', data=data, atmospheric_correction_method=atmospheric_correction_method, cloud_detection_method=cloud_detection_method, elevation_model=elevation_model, atmospheric_correction_options=atmospheric_correction_options, cloud_detection_options=cloud_detection_options)
def array_append(data, value) -> ProcessBuilder:
"""
Append a value to an array
:param data: An array.
:param value: Value to append to the array.
:return: The new array with the value being appended.
"""
return _process('array_append', data=data, value=value)
def array_apply(data, process, context=UNSET) -> ProcessBuilder:
"""
Apply a process to each array element
:param data: An array.
:param process: A process that accepts and returns a single value and is applied on each individual value
in the array. The process may consist of multiple sub-processes and could, for example, consist of
processes such as ``abs()`` or ``linear_scale_range()``.
:param context: Additional data to be passed to the process.
:return: An array with the newly computed values. The number of elements are the same as for the original
array.
"""
return _process('array_apply', data=data, process=process, context=context)
def array_concat(array1, array2) -> ProcessBuilder:
"""
Merge two arrays
:param array1: The first array.
:param array2: The second array.
:return: The merged array.
"""
return _process('array_concat', array1=array1, array2=array2)
def array_contains(data, value) -> ProcessBuilder:
"""
Check whether the array contains a given value
:param data: List to find the value in.
:param value: Value to find in `data`.
:return: `true` if the list contains the value, false` otherwise.
"""
return _process('array_contains', data=data, value=value)
def array_create(data=UNSET, repeat=UNSET) -> ProcessBuilder:
"""
Create an array
:param data: A (native) array to fill the newly created array with. Defaults to an empty array.
:param repeat: The number of times the (native) array specified in `data` is repeatedly added after each
other to the new array being created. Defaults to `1`.
:return: The newly created array.
"""
return _process('array_create', data=data, repeat=repeat)
def array_create_labeled(data, labels) -> ProcessBuilder:
"""
Create a labeled array
:param data: An array of values to be used.
:param labels: An array of labels to be used.
:return: The newly created labeled array.
"""
return _process('array_create_labeled', data=data, labels=labels)
def array_element(data, index=UNSET, label=UNSET, return_nodata=UNSET) -> ProcessBuilder:
"""
Get an element from an array
:param data: An array.
:param index: The zero-based index of the element to retrieve.
:param label: The label of the element to retrieve. Throws an `ArrayNotLabeled` exception, if the given
array is not a labeled array and this parameter is set.
:param return_nodata: By default this process throws an `ArrayElementNotAvailable` exception if the index
or label is invalid. If you want to return `null` instead, set this flag to `true`.
:return: The value of the requested element.
"""
return _process('array_element', data=data, index=index, label=label, return_nodata=return_nodata)
def array_filter(data, condition, context=UNSET) -> ProcessBuilder:
"""
Filter an array based on a condition
:param data: An array.
:param condition: A condition that is evaluated against each value, index and/or label in the array. Only
the array elements for which the condition returns `true` are preserved.
:param context: Additional data to be passed to the condition.
:return: An array filtered by the specified condition. The number of elements are less than or equal
compared to the original array.
"""
return _process('array_filter', data=data, condition=condition, context=context)
def array_find(data, value) -> ProcessBuilder:
"""
Get the index for a value in an array
:param data: List to find the value in.
:param value: Value to find in `data`.
:return: The index of the first element with the specified value. If no element was found, `null` is
returned.
"""
return _process('array_find', data=data, value=value)
def array_find_label(data, label) -> ProcessBuilder:
"""
Get the index for a label in a labeled array
:param data: List to find the label in.
:param label: Label to find in `data`.
:return: The index of the element with the specified label assigned. If no such label was found, `null` is
returned.
"""
return _process('array_find_label', data=data, label=label)
def array_interpolate_linear(data) -> ProcessBuilder:
"""
One-dimensional linear interpolation for arrays
:param data: An array of numbers and no-data values. If the given array is a labeled array, the labels
must have a natural/inherent label order and the process expects the labels to be sorted accordingly. This
is the default behavior in openEO for spatial and temporal dimensions.
:return: An array with no-data values being replaced with interpolated values. If not at least 2 numerical
values are available in the array, the array stays the same.
"""
return _process('array_interpolate_linear', data=data)
def array_labels(data) -> ProcessBuilder:
"""
Get the labels for an array
:param data: An array with labels.
:return: The labels as an array.
"""
return _process('array_labels', data=data)
def array_modify(data, values, index, length=UNSET) -> ProcessBuilder:
"""
Change the content of an array (insert, remove, update)
:param data: An array.
:param values: The values to fill the array with.
:param index: The index of the element to insert the value(s) before. If the index is greater than the
number of elements, the process throws an `ArrayElementNotAvailable` exception. To insert after the last
element, there are two options: 1. Use the simpler processes ``array_append()`` to append a single value
or ``array_concat`` to append multiple values. 2. Specify the number of elements in the array. You can
retrieve the number of elements with the process ``count()``, having the parameter `condition` set to
`true`.
:param length: The number of elements to replace. This parameter has no effect in case the given `index`
does not exist in the array given.
:return: An array with values added, updated or removed.
"""
return _process('array_modify', data=data, values=values, index=index, length=length)
def arsinh(x) -> ProcessBuilder:
"""
Inverse hyperbolic sine
:param x: A number.
:return: The computed angle in radians.
"""
return _process('arsinh', x=x)
def artanh(x) -> ProcessBuilder:
"""
Inverse hyperbolic tangent
:param x: A number.
:return: The computed angle in radians.
"""
return _process('artanh', x=x)
def atmospheric_correction(data, method, elevation_model=UNSET, options=UNSET) -> ProcessBuilder:
"""
Apply atmospheric correction
:param data: Data cube containing multi-spectral optical top of atmosphere reflectances to be corrected.
:param method: The atmospheric correction method to use. To get reproducible results, you have to set a
specific method. Set to `null` to allow the back-end to choose, which will improve portability, but reduce
reproducibility as you *may* get different results if you run the processes multiple times.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the back-
end to choose, which will improve portability, but reduce reproducibility.
:param options: Proprietary options for the atmospheric correction method. Specifying proprietary options
will reduce portability.
:return: Data cube containing bottom of atmosphere reflectances.
"""
return _process('atmospheric_correction', data=data, method=method, elevation_model=elevation_model, options=options)
def between(x, min, max, exclude_max=UNSET) -> ProcessBuilder:
"""
Between comparison
:param x: The value to check.
:param min: Lower boundary (inclusive) to check against.
:param max: Upper boundary (inclusive) to check against.
:param exclude_max: Exclude the upper boundary `max` if set to `true`. Defaults to `false`.
:return: `true` if `x` is between the specified bounds, otherwise `false`.
"""
return _process('between', x=x, min=min, max=max, exclude_max=exclude_max)
def ceil(x) -> ProcessBuilder:
"""
Round fractions up
:param x: A number to round up.
:return: The number rounded up.
"""
return _process('ceil', x=x)
def climatological_normal(data, period, climatology_period=UNSET) -> ProcessBuilder:
"""
Compute climatology normals
:param data: A data cube with exactly one temporal dimension. The data cube must span at least the temporal
interval specified in the parameter `climatology-period`. Seasonal periods may span two consecutive years,
e.g. temporal winter that includes months December, January and February. If the required months before the
actual climate period are available, the season is taken into account. If not available, the first season
is not taken into account and the seasonal mean is based on one year less than the other seasonal normals.
The incomplete season at the end of the last year is never taken into account.
:param period: The time intervals to aggregate the average value for. The following pre-defined frequencies
are supported: * `day`: Day of the year * `month`: Month of the year * `climatology-period`: The period
specified in the `climatology-period`. * `season`: Three month periods of the calendar seasons (December -
February, March - May, June - August, September - November). * `tropical-season`: Six month periods of the
tropical seasons (November - April, May - October).
:param climatology_period: The climatology period as a closed temporal interval. The first element of the
array is the first year to be fully included in the temporal interval. The second element is the last year
to be fully included in the temporal interval. The default period is from 1981 until 2010 (both inclusive).
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the temporal
dimension. The temporal dimension has the following dimension labels: * `day`: `001` - `365` * `month`:
`01` - `12` * `climatology-period`: `climatology-period` * `season`: `djf` (December - February), `mam`
(March - May), `jja` (June - August), `son` (September - November) * `tropical-season`: `ndjfma` (November
- April), `mjjaso` (May - October)
"""
return _process('climatological_normal', data=data, period=period, climatology_period=climatology_period)
def clip(x, min, max) -> ProcessBuilder:
"""
Clip a value between a minimum and a maximum
:param x: A number.
:param min: Minimum value. If the value is lower than this value, the process will return the value of this
parameter.
:param max: Maximum value. If the value is greater than this value, the process will return the value of
this parameter.
:return: The value clipped to the specified range.
"""
return _process('clip', x=x, min=min, max=max)
def cloud_detection(data, method, options=UNSET) -> ProcessBuilder:
"""
Create cloud masks
:param data: The source data cube containing multi-spectral optical top of the atmosphere (TOA)
reflectances on which to perform cloud detection.
:param method: The cloud detection method to use. To get reproducible results, you have to set a specific
method. Set to `null` to allow the back-end to choose, which will improve portability, but reduce
reproducibility as you *may* get different results if you run the processes multiple times.
:param options: Proprietary options for the cloud detection method. Specifying proprietary options will
reduce portability.
:return: A data cube with bands for the atmospheric disturbances. Each of the masks contains values between
0 and 1. The data cube has the same spatial and temporal dimensions as the source data cube and a dimension
that contains a dimension label for each of the supported/considered atmospheric disturbance.
"""
return _process('cloud_detection', data=data, method=method, options=options)
def constant(x) -> ProcessBuilder:
"""
Define a constant value
:param x: The value of the constant.
:return: The value of the constant.
"""
return _process('constant', x=x)
def cos(x) -> ProcessBuilder:
"""
Cosine
:param x: An angle in radians.
:return: The computed cosine of `x`.
"""
return _process('cos', x=x)
def cosh(x) -> ProcessBuilder:
"""
Hyperbolic cosine
:param x: An angle in radians.
:return: The computed hyperbolic cosine of `x`.
"""
return _process('cosh', x=x)
def count(data, condition=UNSET, context=UNSET) -> ProcessBuilder:
"""
Count the number of elements
:param data: An array with elements of any data type.
:param condition: A condition consists of one or more processes, which in the end return a boolean value.
It is evaluated against each element in the array. An element is counted only if the condition returns
`true`. Defaults to count valid elements in a list (see ``is_valid()``). Setting this parameter to boolean
`true` counts all elements in the list.
:param context: Additional data to be passed to the condition.
:return: The counted number of elements.
"""
return _process('count', data=data, condition=condition, context=context)
def create_raster_cube() -> ProcessBuilder:
"""
Create an empty raster data cube
:return: An empty raster data cube with zero dimensions.
"""
return _process('create_raster_cube', )
def cummax(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative maxima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative maxima.
"""
return _process('cummax', data=data, ignore_nodata=ignore_nodata)
def cummax(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative maxima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative maxima.
"""
return _process('cummax', data=data, ignore_nodata=ignore_nodata)
def cummin(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative minima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative minima.
"""
return _process('cummin', data=data, ignore_nodata=ignore_nodata)
def cummin(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative minima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative minima.
"""
return _process('cummin', data=data, ignore_nodata=ignore_nodata)
def cumproduct(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative products
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative products.
"""
return _process('cumproduct', data=data, ignore_nodata=ignore_nodata)
def cumproduct(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative products
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative products.
"""
return _process('cumproduct', data=data, ignore_nodata=ignore_nodata)
def cumsum(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative sums
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative sums.
"""
return _process('cumsum', data=data, ignore_nodata=ignore_nodata)
def cumsum(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative sums
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative sums.
"""
return _process('cumsum', data=data, ignore_nodata=ignore_nodata)
def date_shift(date, value, unit) -> ProcessBuilder:
"""
Manipulates dates and times by addition or subtraction
:param date: The date (and optionally time) to manipulate. If the given date doesn't include the time, the
process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond part of the
time is optional and defaults to `0` if not given.
:param value: The period of time in the unit given that is added (positive numbers) or subtracted (negative
numbers). The value `0` doesn't have any effect.
:param unit: The unit for the value given. The following pre-defined units are available: - millisecond:
Milliseconds - second: Seconds - leap seconds are ignored in computations. - minute: Minutes - hour: Hours
- day: Days - changes only the the day part of a date - week: Weeks (equivalent to 7 days) - month: Months
- year: Years Manipulations with the unit `year`, `month`, `week` or `day` do never change the time. If
any of the manipulations result in an invalid date or time, the corresponding part is rounded down to the
next valid date or time respectively. For example, adding a month to `2020-01-31` would result in
`2020-02-29`.
:return: The manipulated date. If a time component was given in the parameter `date`, the time component is
returned with the date.
"""
return _process('date_shift', date=date, value=value, unit=unit)
def debug(data, code=UNSET, level=UNSET, message=UNSET) -> ProcessBuilder:
"""
Publish debugging information
:param data: Data to publish.
:param code: An identifier to help identify the log entry in a bunch of other log entries.
:param level: The severity level of this message, defaults to `info`. Note that the level `error` forces
the computation to be stopped!
:param message: A message to send in addition to the data.
:return: Returns the data as passed to the `data` parameter.
"""
return _process('debug', data=data, code=code, level=level, message=message)
def debug(data, code=UNSET, level=UNSET, message=UNSET) -> ProcessBuilder:
"""
Publish debugging information
:param data: Data to publish.
:param code: An identifier to help identify the log entry in a bunch of other log entries.
:param level: The severity level of this message, defaults to `info`. Note that the level `error` forces
the computation to be stopped!
:param message: A message to send in addition to the data.
:return: The data as passed to the `data` parameter without any modification.
"""
return _process('debug', data=data, code=code, level=level, message=message)
def dimension_labels(data, dimension) -> ProcessBuilder:
"""
Get the dimension labels
:param data: The data cube.
:param dimension: The name of the dimension to get the labels for.
:return: The labels as an array.
"""
return _process('dimension_labels', data=data, dimension=dimension)
def divide(x, y) -> ProcessBuilder:
"""
Division of two numbers
:param x: The dividend.
:param y: The divisor.
:return: The computed result.
"""
return _process('divide', x=x, y=y)
def drop_dimension(data, name) -> ProcessBuilder:
"""
Remove a dimension
:param data: The data cube to drop a dimension from.
:param name: Name of the dimension to drop.
:return: A data cube without the specified dimension. The number of dimensions decreases by one, but the
dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain
unchanged.
"""
return _process('drop_dimension', data=data, name=name)
def e() -> ProcessBuilder:
"""
Euler's number (e)
:return: The numerical value of Euler's number.
"""
return _process('e', )
def eq(x, y, delta=UNSET, case_sensitive=UNSET) -> ProcessBuilder:
"""
Equal to comparison
:param x: First operand.
:param y: Second operand.
:param delta: Only applicable for comparing two numbers. If this optional parameter is set to a positive
non-zero number the equality of two numbers is checked against a delta value. This is especially useful to
circumvent problems with floating-point inaccuracy in machine-based computation. This option is basically
an alias for the following computation: `lte(abs(minus([x, y]), delta)`
:param case_sensitive: Only applicable for comparing two strings. Case sensitive comparison can be disabled
by setting this parameter to `false`.
:return: `true` if `x` is equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('eq', x=x, y=y, delta=delta, case_sensitive=case_sensitive)
def exp(p) -> ProcessBuilder:
"""
Exponentiation to the base e
:param p: The numerical exponent.
:return: The computed value for *e* raised to the power of `p`.
"""
return _process('exp', p=p)
def extrema(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Minimum and maximum values
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that an array with two `null` values is returned if any
value is such a value.
:return: An array containing the minimum and maximum values for the specified numbers. The first element is
the minimum, the second element is the maximum. If the input array is empty both elements are set to
`null`.
"""
return _process('extrema', data=data, ignore_nodata=ignore_nodata)
def filter_bands(data, bands=UNSET, wavelengths=UNSET) -> ProcessBuilder:
"""
Filter the bands by names
:param data: A data cube with bands.
:param bands: A list of band names. Either the unique band name (metadata field `name` in bands) or one of
the common band names (metadata field `common_name` in bands). If the unique band name and the common name
conflict, the unique band name has a higher priority. The order of the specified array defines the order
of the bands in the data cube. If multiple bands match a common name, all matched bands are included in the
original order.
:param wavelengths: A list of sub-lists with each sub-list consisting of two elements. The first element is
the minimum wavelength and the second element is the maximum wavelength. Wavelengths are specified in
micrometers (μm). The order of the specified array defines the order of the bands in the data cube. If
multiple bands match the wavelengths, all matched bands are included in the original order.
:return: A data cube limited to a subset of its original bands. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the dimension of type
`bands` has less (or the same) dimension labels.
"""
return _process('filter_bands', data=data, bands=bands, wavelengths=wavelengths)
def filter_bbox(data, extent) -> ProcessBuilder:
"""
Spatial filter using a bounding box
:param data: A data cube.
:param extent: A bounding box, which may include a vertical axis (see `base` and `height`).
:return: A data cube restricted to the bounding box. The dimensions and dimension properties (name, type,
labels, reference system and resolution) remain unchanged, except that the spatial dimensions have less (or
the same) dimension labels.
"""
return _process('filter_bbox', data=data, extent=extent)
def filter_labels(data, condition, dimension, context=UNSET) -> ProcessBuilder:
"""
Filter dimension labels based on a condition
:param data: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified dimension. A
dimension label and the corresponding data is preserved for the given dimension, if the condition returns
`true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable` error if the
specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same) dimension
labels.
"""
return _process('filter_labels', data=data, condition=condition, dimension=dimension, context=context)
def filter_labels(data, condition, dimension, context=UNSET) -> ProcessBuilder:
"""
Filter dimension labels based on a condition
:param data: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified dimension. A
dimension label and the corresponding data is preserved for the given dimension, if the condition returns
`true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable` exception if
the specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same) dimension
labels.
"""
return _process('filter_labels', data=data, condition=condition, dimension=dimension, context=context)
def filter_spatial(data, geometries) -> ProcessBuilder:
"""
Spatial filter using geometries
:param data: A data cube.
:param geometries: One or more geometries used for filtering, specified as GeoJSON.
:return: A data cube restricted to the specified geometries. The dimensions and dimension properties (name,
type, labels, reference system and resolution) remain unchanged, except that the spatial dimensions have
less (or the same) dimension labels.
"""
return _process('filter_spatial', data=data, geometries=geometries)
def filter_temporal(data, extent, dimension=UNSET) -> ProcessBuilder:
"""
Temporal filter for a temporal intervals
:param data: A data cube.
:param extent: Left-closed temporal interval, i.e. an array with exactly two elements: 1. The first
element is the start of the temporal interval. The specified instance in time is **included** in the
interval. 2. The second element is the end of the temporal interval. The specified instance in time is
**excluded** from the interval. The specified temporal strings follow [RFC 3339](https://www.rfc-
editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but
never both.
:param dimension: The name of the temporal dimension to filter on. If no specific dimension is specified or
it is set to `null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable`
exception if the specified dimension does not exist.
:return: A data cube restricted to the specified temporal extent. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the temporal dimensions
(determined by `dimensions` parameter) may have less dimension labels.
"""
return _process('filter_temporal', data=data, extent=extent, dimension=dimension)
def first(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
First element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the first value is such a
value.
:return: The first element of the input array.
"""
return _process('first', data=data, ignore_nodata=ignore_nodata)
def floor(x) -> ProcessBuilder:
"""
Round fractions down
:param x: A number to round down.
:return: The number rounded down.
"""
return _process('floor', x=x)
def gt(x, y) -> ProcessBuilder:
"""
Greater than comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly greater than `y` or `null` if any operand is `null`, otherwise `false`.
"""
return _process('gt', x=x, y=y)
def gte(x, y) -> ProcessBuilder:
"""
Greater than or equal to comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is greater than or equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('gte', x=x, y=y)
def if_(value, accept, reject=UNSET) -> ProcessBuilder:
"""
If-Then-Else conditional
:param value: A boolean value.
:param accept: A value that is returned if the boolean value is `true`.
:param reject: A value that is returned if the boolean value is **not** `true`. Defaults to `null`.
:return: Either the `accept` or `reject` argument depending on the given boolean value.
"""
return _process('if', value=value, accept=accept, reject=reject)
def int(x) -> ProcessBuilder:
"""
Integer part of a number
:param x: A number.
:return: Integer part of the number.
"""
return _process('int', x=x)
def is_infinite(x) -> ProcessBuilder:
"""
Value is an infinite number
:param x: The data to check.
:return: `true` if the data is an infinite number, otherwise `false`.
"""
return _process('is_infinite', x=x)
def is_nan(x) -> ProcessBuilder:
"""
Value is not a number
:param x: The data to check.
:return: `true` if the data is not a number, otherwise `false`.
"""
return _process('is_nan', x=x)
def is_nodata(x) -> ProcessBuilder:
"""
Value is not a no-data value
:param x: The data to check.
:return: `true` if the data is a no-data value, otherwise `false`.
"""
return _process('is_nodata', x=x)
def is_valid(x) -> ProcessBuilder:
"""
Value is valid data
:param x: The data to check.
:return: `true` if the data is valid, otherwise `false`.
"""
return _process('is_valid', x=x)
def last(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Last element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the last value is such a value.
:return: The last element of the input array.
"""
return _process('last', data=data, ignore_nodata=ignore_nodata)
def linear_scale_range(x, inputMin, inputMax, outputMin=UNSET, outputMax=UNSET) -> ProcessBuilder:
"""
Linear transformation between two ranges
:param x: A number to transform. The number gets clipped to the bounds specified in `inputMin` and
`inputMax`.
:param inputMin: Minimum value the input can obtain.
:param inputMax: Maximum value the input can obtain.
:param outputMin: Minimum value of the desired output range.
:param outputMax: Maximum value of the desired output range.
:return: The transformed number.
"""
return _process('linear_scale_range', x=x, inputMin=inputMin, inputMax=inputMax, outputMin=outputMin, outputMax=outputMax)
def ln(x) -> ProcessBuilder:
"""
Natural logarithm
:param x: A number to compute the natural logarithm for.
:return: The computed natural logarithm.
"""
return _process('ln', x=x)
def load_collection(id, spatial_extent, temporal_extent, bands=UNSET, properties=UNSET) -> ProcessBuilder:
"""
Load a collection
:param id: The collection id.
:param spatial_extent: Limits the data to load from the collection to the specified bounding box or
polygons. The process puts a pixel into the data cube if the point at the pixel center intersects with the
bounding box or any of the polygons (as defined in the Simple Features standard by the OGC). The GeoJSON
can be one of the following feature types: * A `Polygon` or `MultiPolygon` geometry, * a `Feature` with a
`Polygon` or `MultiPolygon` geometry, * a `FeatureCollection` containing at least one `Feature` with
`Polygon` or `MultiPolygon` geometries, or * a `GeometryCollection` containing `Polygon` or `MultiPolygon`
geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the
alternatives above. Set this parameter to `null` to set no limit for the spatial extent. Be careful with
this when loading large datasets! It is recommended to use this parameter instead of using
``filter_bbox()`` or ``filter_spatial()`` directly after loading unbounded data.
:param temporal_extent: Limits the data to load from the collection to the specified left-closed temporal
interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two
elements: 1. The first element is the start of the temporal interval. The specified instance in time is
**included** in the interval. 2. The second element is the end of the temporal interval. The specified
instance in time is **excluded** from the interval. The specified temporal strings follow [RFC
3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the
boundaries to `null`, but never both. Set this parameter to `null` to set no limit for the temporal
extent. Be careful with this when loading large datasets! It is recommended to use this parameter instead
of using ``filter_temporal()`` directly after loading unbounded data.
:param bands: Only adds the specified bands into the data cube so that bands that don't match the list of
band names are not available. Applies to all dimensions of type `bands`. Either the unique band name
(metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands)
can be specified. If the unique band name and the common name conflict, the unique band name has a higher
priority. The order of the specified array defines the order of the bands in the data cube. If multiple
bands match a common name, all matched bands are included in the original order. It is recommended to use
this parameter instead of using ``filter_bands()`` directly after loading unbounded data.
:param properties: Limits the data by metadata properties to include only data in the data cube which all
given conditions return `true` for (AND operation). Specify key-value-pairs with the key being the name of
the metadata property, which can be retrieved with the openEO Data Discovery for Collections. The value
must a condition (user-defined process) to be evaluated against the collection metadata, see the example.
:return: A data cube for further processing. The dimensions and dimension properties (name, type, labels,
reference system and resolution) correspond to the collection's metadata, but the dimension labels are
restricted as specified in the parameters.
"""
return _process('load_collection', id=id, spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands, properties=properties)
def load_result(id) -> ProcessBuilder:
"""
Load batch job results
:param id: The id of a batch job with results.
:return: A data cube for further processing.
"""
return _process('load_result', id=id)
def load_result(id) -> ProcessBuilder:
"""
Load batch job results
:param id: The id of a batch job with results.
:return: A data cube for further processing.
"""
return _process('load_result', id=id)
def load_uploaded_files(paths, format, options=UNSET) -> ProcessBuilder:
"""
Load files from the user workspace
:param paths: The files to read. Folders can't be specified, instead specify all files. An error is thrown
if a file can't be read.
:param format: The file format to read from. It must be one of the values that the server reports as
supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is not
suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is *case
insensitive*.
:param options: The file format parameters to be used to read the files. Must correspond to the parameters
that the server reports as supported parameters for the chosen `format`. The parameter names and valid
values usually correspond to the GDAL/OGR format options.
:return: A data cube for further processing.
"""
return _process('load_uploaded_files', paths=paths, format=format, options=options)
def load_uploaded_files(paths, format, options=UNSET) -> ProcessBuilder:
"""
Load files from the user workspace
:param paths: The files to read. Folders can't be specified, specify all files instead. An exception is
thrown if a file can't be read.
:param format: The file format to read from. It must be one of the values that the server reports as
supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is not
suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is *case
insensitive*.
:param options: The file format parameters to be used to read the files. Must correspond to the parameters
that the server reports as supported parameters for the chosen `format`. The parameter names and valid
values usually correspond to the GDAL/OGR format options.
:return: A data cube for further processing.
"""
return _process('load_uploaded_files', paths=paths, format=format, options=options)
def log(x, base) -> ProcessBuilder:
"""
Logarithm to a base
:param x: A number to compute the logarithm for.
:param base: The numerical base.
:return: The computed logarithm.
"""
return _process('log', x=x, base=base)
def lt(x, y) -> ProcessBuilder:
"""
Less than comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly less than `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('lt', x=x, y=y)
def lte(x, y) -> ProcessBuilder:
"""
Less than or equal to comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is less than or equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('lte', x=x, y=y)
def mask(data, mask, replacement=UNSET) -> ProcessBuilder:
"""
Apply a raster mask
:param data: A raster data cube.
:param mask: A mask as a raster data cube. Every pixel in `data` must have a corresponding element in
`mask`.
:param replacement: The value used to replace masked values with.
:return: A masked raster data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged.
"""
return _process('mask', data=data, mask=mask, replacement=replacement)
def mask_polygon(data, mask, replacement=UNSET, inside=UNSET) -> ProcessBuilder:
"""
Apply a polygon mask
:param data: A raster data cube.
:param mask: A GeoJSON object containing at least one polygon. The provided feature types can be one of the
following: * A `Polygon` or `MultiPolygon` geometry, * a `Feature` with a `Polygon` or `MultiPolygon`
geometry, * a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon`
geometries, or * a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries. To maximize
interoperability, `GeometryCollection` should be avoided in favour of one of the alternatives above.
:param replacement: The value used to replace masked values with.
:param inside: If set to `true` all pixels for which the point at the pixel center **does** intersect with
any polygon are replaced.
:return: A masked raster data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged.
"""
return _process('mask_polygon', data=data, mask=mask, replacement=replacement, inside=inside)
def max(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Maximum value
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The maximum value.
"""
return _process('max', data=data, ignore_nodata=ignore_nodata)
def mean(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Arithmetic mean (average)
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed arithmetic mean.
"""
return _process('mean', data=data, ignore_nodata=ignore_nodata)
def median(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Statistical median
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed statistical median.
"""
return _process('median', data=data, ignore_nodata=ignore_nodata)
def merge_cubes(cube1, cube2, overlap_resolver=UNSET, context=UNSET) -> ProcessBuilder:
"""
Merge two data cubes
:param cube1: The first data cube.
:param cube2: The second data cube.
:param overlap_resolver: A reduction operator that resolves the conflict if the data overlaps. The reducer
must return a value of the same data type as the input values are. The reduction operator may be a single
process such as ``multiply()`` or consist of multiple sub-processes. `null` (the default) can be specified
if no overlap resolver is required.
:param context: Additional data to be passed to the overlap resolver.
:return: The merged data cube. See the process description for details regarding the dimensions and
dimension properties (name, type, labels, reference system and resolution).
"""
return _process('merge_cubes', cube1=cube1, cube2=cube2, overlap_resolver=overlap_resolver, context=context)
def min(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Minimum value
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The minimum value.
"""
return _process('min', data=data, ignore_nodata=ignore_nodata)
def mod(x, y) -> ProcessBuilder:
"""
Modulo
:param x: A number to be used as the dividend.
:param y: A number to be used as the divisor.
:return: The remainder after division.
"""
return _process('mod', x=x, y=y)
def multiply(x, y) -> ProcessBuilder:
"""
Multiplication of two numbers
:param x: The multiplier.
:param y: The multiplicand.
:return: The computed product of the two numbers.
"""
return _process('multiply', x=x, y=y)
def nan() -> ProcessBuilder:
"""
Not a Number (NaN)
:return: Returns `NaN`.
"""
return _process('nan', )
def ndvi(data, nir=UNSET, red=UNSET, target_band=UNSET) -> ProcessBuilder:
"""
Normalized Difference Vegetation Index
:param data: A raster data cube with two bands that have the common names `red` and `nir` assigned.
:param nir: The name of the NIR band. Defaults to the band that has the common name `nir` assigned. Either
the unique band name (metadata field `name` in bands) or one of the common band names (metadata field
`common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique
band name has a higher priority.
:param red: The name of the red band. Defaults to the band that has the common name `red` assigned. Either
the unique band name (metadata field `name` in bands) or one of the common band names (metadata field
`common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique
band name has a higher priority.
:param target_band: By default, the dimension of type `bands` is dropped. To keep the dimension specify a
new band name in this parameter so that a new dimension label with the specified name will be added for the
computed values.
:return: A raster data cube containing the computed NDVI values. The structure of the data cube differs
depending on the value passed to `target_band`: * `target_band` is `null`: The data cube does not contain
the dimension of type `bands`, the number of dimensions decreases by one. The dimension properties (name,
type, labels, reference system and resolution) for all other dimensions remain unchanged. * `target_band`
is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the
number of dimension labels for the dimension of type `bands` increases by one. The additional label is
named as specified in `target_band`.
"""
return _process('ndvi', data=data, nir=nir, red=red, target_band=target_band)
def neq(x, y, delta=UNSET, case_sensitive=UNSET) -> ProcessBuilder:
"""
Not equal to comparison
:param x: First operand.
:param y: Second operand.
:param delta: Only applicable for comparing two numbers. If this optional parameter is set to a positive
non-zero number the non-equality of two numbers is checked against a delta value. This is especially useful
to circumvent problems with floating-point inaccuracy in machine-based computation. This option is
basically an alias for the following computation: `gt(abs(minus([x, y]), delta)`
:param case_sensitive: Only applicable for comparing two strings. Case sensitive comparison can be disabled
by setting this parameter to `false`.
:return: `true` if `x` is *not* equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('neq', x=x, y=y, delta=delta, case_sensitive=case_sensitive)
def normalized_difference(x, y) -> ProcessBuilder:
"""
Normalized difference
:param x: The value for the first band.
:param y: The value for the second band.
:return: The computed normalized difference.
"""
return _process('normalized_difference', x=x, y=y)
def not_(x) -> ProcessBuilder:
"""
Inverting a boolean
:param x: Boolean value to invert.
:return: Inverted boolean value.
"""
return _process('not', x=x)
def or_(x, y) -> ProcessBuilder:
"""
Logical OR
:param x: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical OR.
"""
return _process('or', x=x, y=y)
def order(data, asc=UNSET, nodata=UNSET) -> ProcessBuilder:
"""
Create a permutation
:param data: An array to compute the order for.
:param asc: The default sort order is ascending, with smallest values first. To sort in reverse
(descending) order, set this parameter to `false`.
:param nodata: Controls the handling of no-data values (`null`). By default, they are removed. If set to
`true`, missing values in the data are put last; if set to `false`, they are put first.
:return: The computed permutation.
"""
return _process('order', data=data, asc=asc, nodata=nodata)
def pi() -> ProcessBuilder:
"""
Pi (π)
:return: The numerical value of Pi.
"""
return _process('pi', )
def power(base, p) -> ProcessBuilder:
"""
Exponentiation
:param base: The numerical base.
:param p: The numerical exponent.
:return: The computed value for `base` raised to the power of `p`.
"""
return _process('power', base=base, p=p)
def product(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Compute the product by multiplying numbers
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed product of the sequence of numbers.
"""
return _process('product', data=data, ignore_nodata=ignore_nodata)
def quantiles(data, probabilities=UNSET, q=UNSET, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Quantiles
:param data: An array of numbers.
:param probabilities: A list of probabilities to calculate quantiles for. The probabilities must be between
0 and 1.
:param q: Intervals to calculate quantiles for. Calculates q-quantiles with (nearly) equal-sized intervals.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that an array with `null` values is returned if any
element is such a value.
:return: An array with the computed quantiles. The list has either * as many elements as the given list of
`probabilities` had or * *`q`-1* elements. If the input array is empty the resulting array is filled with
as many `null` values as required according to the list above. See the 'Empty array' example for an
example.
"""
return _process('quantiles', data=data, probabilities=probabilities, q=q, ignore_nodata=ignore_nodata)
def rearrange(data, order) -> ProcessBuilder:
"""
Rearrange an array based on a permutation
:param data: The array to rearrange.
:param order: The permutation used for rearranging.
:return: The rearranged array.
"""
return _process('rearrange', data=data, order=order)
def reduce_dimension(data, reducer, dimension, context=UNSET) -> ProcessBuilder:
"""
Reduce dimensions
:param data: A data cube.
:param reducer: A reducer to apply on the specified dimension. A reducer is a single process such as
``mean()`` or a set of processes, which computes a single value for a list of values, see the category
'reducer' for such processes.
:param dimension: The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable`
exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution)
for all other dimensions remain unchanged.
"""
return _process('reduce_dimension', data=data, reducer=reducer, dimension=dimension, context=context)
def reduce_dimension_binary(data, reducer, dimension, context=UNSET) -> ProcessBuilder:
"""
Reduce dimensions using binary reduction
:param data: A data cube.
:param reducer: A reduction operator to be applied consecutively on pairs of values. It must be both
associative and commutative as the execution may be executed in parallel and therefore the order of
execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or consist of
multiple sub-processes.
:param dimension: The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable`
error if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution)
for all other dimensions remain unchanged.
"""
return _process('reduce_dimension_binary', data=data, reducer=reducer, dimension=dimension, context=context)
def reduce_spatial(data, reducer, context=UNSET) -> ProcessBuilder:
"""
Reduce spatial dimensions 'x' and 'y'
:param data: A data cube.
:param reducer: A reducer to apply on the horizontal spatial dimensions. A reducer is a single process such
as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category
'reducer' for such processes.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the horizontal spatial dimensions, the
number of dimensions decreases by two. The dimension properties (name, type, labels, reference system and
resolution) for all other dimensions remain unchanged.
"""
return _process('reduce_spatial', data=data, reducer=reducer, context=context)
def rename_dimension(data, source, target) -> ProcessBuilder:
"""
Rename a dimension
:param data: The data cube.
:param source: The current name of the dimension. Fails with a `DimensionNotAvailable` exception if the
specified dimension does not exist.
:param target: A new Name for the dimension. Fails with a `DimensionExists` exception if a dimension with
the specified name exists.
:return: A data cube with the same dimensions, but the name of one of the dimensions changes. The old name
can not be referred to any longer. The dimension properties (name, type, labels, reference system and
resolution) remain unchanged.
"""
return _process('rename_dimension', data=data, source=source, target=target)
def rename_labels(data, dimension, target, source=UNSET) -> ProcessBuilder:
"""
Rename dimension labels
:param data: The data cube.
:param dimension: The name of the dimension to rename the labels for.
:param target: The new names for the labels. The dimension labels in the data cube are expected to be
enumerated if the parameter `target` is not specified. If a target dimension label already exists in the
data cube, a `LabelExists` exception is thrown.
:param source: The names of the labels as they are currently in the data cube. The array defines an
unsorted and potentially incomplete list of labels that should be renamed to the names available in the
corresponding array elements in the parameter `target`. If one of the source dimension labels doesn't
exist, the `LabelNotAvailable` exception is thrown. By default, the array is empty so that the dimension
labels in the data cube are expected to be enumerated.
:return: The data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that for the given dimension the labels change. The old
labels can not be referred to any longer. The number of labels remains the same.
"""
return _process('rename_labels', data=data, dimension=dimension, target=target, source=source)
def resample_cube_spatial(data, target, method=UNSET) -> ProcessBuilder:
"""
Resample the spatial dimensions to match a target data cube
:param data: A data cube.
:param target: A data cube that describes the spatial target resolution.
:param method: Resampling method to use. The following options are available and are meant to align with
[`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r): * `average`: average (mean)
resampling, computes the weighted average of all valid pixels * `bilinear`: bilinear resampling * `cubic`:
cubic resampling * `cubicspline`: cubic spline resampling * `lanczos`: Lanczos windowed sinc resampling *
`max`: maximum resampling, selects the maximum value from all valid pixels * `med`: median resampling,
selects the median value of all valid pixels * `min`: minimum resampling, selects the minimum value from
all valid pixels * `mode`: mode resampling, selects the value which appears most often of all the sampled
points * `near`: nearest neighbour resampling (default) * `q1`: first quartile resampling, selects the
first quartile value of all valid pixels * `q3`: third quartile resampling, selects the third quartile
value of all valid pixels * `rms` root mean square (quadratic mean) of all valid pixels * `sum`: compute
the weighted sum of all valid pixels Valid pixels are determined based on the function ``is_valid()``.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the spatial
dimensions.
"""
return _process('resample_cube_spatial', data=data, target=target, method=method)
def resample_cube_temporal(data, target, method, dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Resample a temporal dimension to match a target data cube
:param data: A data cube.
:param target: A data cube that describes the temporal target resolution.
:param method: A resampling method to be applied, could be a reducer for downsampling or other methods for
upsampling. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single
value for a list of values, see the category 'reducer' for such processes.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in both
data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only have one
temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails with a
`DimensionNotAvailable` error if the specified dimension does not exist.
:param context: Additional data to be passed to the process specified for the parameter `method`.
:return: A raster data cube with the same dimensions and the same dimension properties (name, type, labels,
reference system and resolution) for all non-temporal dimensions. For the temporal dimension the name and
type remain unchanged, but the reference system changes and the labels and resolution may change.
"""
return _process('resample_cube_temporal', data=data, target=target, method=method, dimension=dimension, context=context)
def resample_cube_temporal(data, target, dimension=UNSET, valid_within=UNSET) -> ProcessBuilder:
"""
Resample temporal dimensions to match a target data cube
:param data: A data cube with one or more temporal dimensions.
:param target: A data cube that describes the temporal target resolution.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in both
data cubes. If the dimension is not set or is set to `null`, the process resamples all temporal dimensions
that exist with the same names in both data cubes. The following exceptions may occur: * A dimension is
given, but it does not exist in any of the data cubes: `DimensionNotAvailable` * A dimension is given, but
one of them is not temporal: `DimensionMismatch` * No specific dimension name is given and there are no
temporal dimensions with the same name in the data: `DimensionMismatch`
:param valid_within: Setting this parameter to a numerical value enables that the process searches for
valid values within the given period of days before and after the target timestamps. Valid values are
determined based on the function ``is_valid()``. For example, the limit of `7` for the target timestamps
`2020-01-15 12:00:00` looks for a nearest neighbor after `2020-01-08 12:00:00` and before `2020-01-22
12:00:00`. If no valid value is found within the given period, the value will be set to no-data (`null`).
:return: A raster data cube with the same dimensions and the same dimension properties (name, type, labels,
reference system and resolution) for all non-temporal dimensions. For the temporal dimension, the name and
type remain unchanged, but the dimension labels, resolution and reference system may change.
"""
return _process('resample_cube_temporal', data=data, target=target, dimension=dimension, valid_within=valid_within)
def resample_spatial(data, resolution=UNSET, projection=UNSET, method=UNSET, align=UNSET) -> ProcessBuilder:
"""
Resample and warp the spatial dimensions
:param data: A raster data cube.
:param resolution: Resamples the data cube to the target resolution, which can be specified either as
separate values for x and y or as a single value for both axes. Specified in the units of the target
projection. Doesn't change the resolution by default (`0`).
:param projection: Warps the data cube to the target projection, specified as as [EPSG
code](http://www.epsg-registry.org/), [WKT2 (ISO 19162)
string](http://docs.opengeospatial.org/is/18-010r7/18-010r7.html), [PROJ definition
(deprecated)](https://proj.org/usage/quickstart.html). By default (`null`), the projection is not changed.
:param method: Resampling method to use. The following options are available and are meant to align with
[`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r): * `average`: average (mean)
resampling, computes the weighted average of all valid pixels * `bilinear`: bilinear resampling * `cubic`:
cubic resampling * `cubicspline`: cubic spline resampling * `lanczos`: Lanczos windowed sinc resampling *
`max`: maximum resampling, selects the maximum value from all valid pixels * `med`: median resampling,
selects the median value of all valid pixels * `min`: minimum resampling, selects the minimum value from
all valid pixels * `mode`: mode resampling, selects the value which appears most often of all the sampled
points * `near`: nearest neighbour resampling (default) * `q1`: first quartile resampling, selects the
first quartile value of all valid pixels * `q3`: third quartile resampling, selects the third quartile
value of all valid pixels * `rms` root mean square (quadratic mean) of all valid pixels * `sum`: compute
the weighted sum of all valid pixels Valid pixels are determined based on the function ``is_valid()``.
:param align: Specifies to which corner of the spatial extent the new resampled data is aligned to.
:return: A raster data cube with values warped onto the new projection. It has the same dimensions and the
same dimension properties (name, type, labels, reference system and resolution) for all non-spatial or
vertical spatial dimensions. For the horizontal spatial dimensions the name and type remain unchanged, but
reference system, labels and resolution may change depending on the given parameters.
"""
return _process('resample_spatial', data=data, resolution=resolution, projection=projection, method=method, align=align)
def round(x, p=UNSET) -> ProcessBuilder:
"""
Round to a specified precision
:param x: A number to round.
:param p: A positive number specifies the number of digits after the decimal point to round to. A negative
number means rounding to a power of ten, so for example *-2* rounds to the nearest hundred. Defaults to
*0*.
:return: The rounded number.
"""
return _process('round', x=x, p=p)
def run_udf(data, udf, runtime, version=UNSET, context=UNSET) -> ProcessBuilder:
"""
Run a UDF
:param data: The data to be passed to the UDF as an array or raster data cube.
:param udf: Either source code, an absolute URL or a path to a UDF script.
:param runtime: A UDF runtime identifier available at the back-end.
:param version: An UDF runtime version. If set to `null`, the default runtime version specified for each
runtime is used.
:param context: Additional data such as configuration options to be passed to the UDF.
:return: The data processed by the UDF. * Returns a raster data cube, if a raster data cube is passed for
`data`. Details on the dimensions and dimension properties (name, type, labels, reference system and
resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be of any data
type, but is exactly what the UDF returns.
"""
return _process('run_udf', data=data, udf=udf, runtime=runtime, version=version, context=context)
def run_udf_externally(data, url, context=UNSET) -> ProcessBuilder:
"""
Run an externally hosted UDF container
:param data: The data to be passed to the UDF as array or raster data cube.
:param url: URL to a remote UDF service.
:param context: Additional data such as configuration options that should be passed to the UDF.
:return: The data processed by the UDF service. * Returns a raster data cube, if a raster data cube is
passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference system
and resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be of any
data type, but is exactly what the UDF returns.
"""
return _process('run_udf_externally', data=data, url=url, context=context)
def run_udf_externally(data, url, context=UNSET) -> ProcessBuilder:
"""
Run an externally hosted UDF container
:param data: The data to be passed to the UDF as an array or raster data cube.
:param url: Absolute URL to a remote UDF service.
:param context: Additional data such as configuration options to be passed to the UDF.
:return: The data processed by the UDF service. * Returns a raster data cube if a raster data cube is
passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference system
and resolution) depend on the UDF. * If an array is passed for `data`, the returned value can be of any
data type, but is exactly what the UDF returns.
"""
return _process('run_udf_externally', data=data, url=url, context=context)
def sar_backscatter(data, coefficient=UNSET, elevation_model=UNSET, mask=UNSET, contributing_area=UNSET, local_incidence_angle=UNSET, ellipsoid_incidence_angle=UNSET, noise_removal=UNSET) -> ProcessBuilder:
"""
Computes backscatter from SAR input
:param data: The source data cube containing SAR input.
:param coefficient: Select the radiometric correction coefficient. The following options are available: *
`beta0`: radar brightness * `sigma0-ellipsoid`: ground area computed with ellipsoid earth model *
`sigma0-terrain`: ground area computed with terrain earth model * `gamma0-ellipsoid`: ground area computed
with ellipsoid earth model in sensor line of sight * `gamma0-terrain`: ground area computed with terrain
earth model in sensor line of sight (default) * `null`: non-normalized backscatter
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the back-
end to choose, which will improve portability, but reduce reproducibility.
:param mask: If set to `true`, a data mask is added to the bands with the name `mask`. It indicates which
values are valid (1), invalid (0) or contain no-data (null).
:param contributing_area: If set to `true`, a DEM-based local contributing area band named
`contributing_area` is added. The values are given in square meters.
:param local_incidence_angle: If set to `true`, a DEM-based local incidence angle band named
`local_incidence_angle` is added. The values are given in degrees.
:param ellipsoid_incidence_angle: If set to `true`, an ellipsoidal incidence angle band named
`ellipsoid_incidence_angle` is added. The values are given in degrees.
:param noise_removal: If set to `false`, no noise removal is applied. Defaults to `true`, which removes
noise.
:return: Backscatter values corresponding to the chosen parametrization. The values are given in linear
scale.
"""
return _process('sar_backscatter', data=data, coefficient=coefficient, elevation_model=elevation_model, mask=mask, contributing_area=contributing_area, local_incidence_angle=local_incidence_angle, ellipsoid_incidence_angle=ellipsoid_incidence_angle, noise_removal=noise_removal)
def save_result(data, format, options=UNSET) -> ProcessBuilder:
"""
Save processed data to storage
:param data: The data to save.
:param format: The file format to save to. It must be one of the values that the server reports as
supported output file formats, which usually correspond to the short GDAL/OGR codes. If the format is not
suitable for storing the underlying data structure, a `FormatUnsuitable` exception will be thrown. This
parameter is *case insensitive*.
:param options: The file format parameters to be used to create the file(s). Must correspond to the
parameters that the server reports as supported parameters for the chosen `format`. The parameter names and
valid values usually correspond to the GDAL/OGR format options.
:return: `false` if saving failed, `true` otherwise.
"""
return _process('save_result', data=data, format=format, options=options)
def sd(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Standard deviation
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed sample standard deviation.
"""
return _process('sd', data=data, ignore_nodata=ignore_nodata)
def sgn(x) -> ProcessBuilder:
"""
Signum
:param x: A number.
:return: The computed signum value of `x`.
"""
return _process('sgn', x=x)
def sin(x) -> ProcessBuilder:
"""
Sine
:param x: An angle in radians.
:return: The computed sine of `x`.
"""
return _process('sin', x=x)
def sinh(x) -> ProcessBuilder:
"""
Hyperbolic sine
:param x: An angle in radians.
:return: The computed hyperbolic sine of `x`.
"""
return _process('sinh', x=x)
def sort(data, asc=UNSET, nodata=UNSET) -> ProcessBuilder:
"""
Sort data
:param data: An array with data to sort.
:param asc: The default sort order is ascending, with smallest values first. To sort in reverse
(descending) order, set this parameter to `false`.
:param nodata: Controls the handling of no-data values (`null`). By default, they are removed. If set to
`true`, missing values in the data are put last; if set to `false`, they are put first.
:return: The sorted array.
"""
return _process('sort', data=data, asc=asc, nodata=nodata)
def sqrt(x) -> ProcessBuilder:
"""
Square root
:param x: A number.
:return: The computed square root.
"""
return _process('sqrt', x=x)
def subtract(x, y) -> ProcessBuilder:
"""
Subtraction of two numbers
:param x: The minuend.
:param y: The subtrahend.
:return: The computed result.
"""
return _process('subtract', x=x, y=y)
def sum(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Compute the sum by adding up numbers
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed sum of the sequence of numbers.
"""
return _process('sum', data=data, ignore_nodata=ignore_nodata)
def tan(x) -> ProcessBuilder:
"""
Tangent
:param x: An angle in radians.
:return: The computed tangent of `x`.
"""
return _process('tan', x=x)
def tanh(x) -> ProcessBuilder:
"""
Hyperbolic tangent
:param x: An angle in radians.
:return: The computed hyperbolic tangent of `x`.
"""
return _process('tanh', x=x)
def text_begins(data, pattern, case_sensitive=UNSET) -> ProcessBuilder:
"""
Text begins with another text
:param data: Text in which to find something at the beginning.
:param pattern: Text to find at the beginning of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` begins with `pattern`, false` otherwise.
"""
return _process('text_begins', data=data, pattern=pattern, case_sensitive=case_sensitive)
def text_contains(data, pattern, case_sensitive=UNSET) -> ProcessBuilder:
"""
Text contains another text
:param data: Text in which to find something in.
:param pattern: Text to find in `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` contains the `pattern`, false` otherwise.
"""
return _process('text_contains', data=data, pattern=pattern, case_sensitive=case_sensitive)
def text_ends(data, pattern, case_sensitive=UNSET) -> ProcessBuilder:
"""
Text ends with another text
:param data: Text in which to find something at the end.
:param pattern: Text to find at the end of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` ends with `pattern`, false` otherwise.
"""
return _process('text_ends', data=data, pattern=pattern, case_sensitive=case_sensitive)
def text_merge(data, separator=UNSET) -> ProcessBuilder:
"""
Concatenate elements to a single text
:param data: A set of elements. Numbers, boolean values and null values get converted to their (lower case)
string representation. For example: `1` (integer), `-1.5` (number), `true` / `false` (boolean values)
:param separator: A separator to put between each of the individual texts. Defaults to an empty string.
:return: A string containing a string representation of all the array elements in the same order, with the
separator between each element.
"""
return _process('text_merge', data=data, separator=separator)
def trim_cube(data) -> ProcessBuilder:
"""
Remove dimension labels with no-data values
:param data: A raster data cube to trim.
:return: A trimmed raster data cube with the same dimensions. The dimension properties name, type,
reference system and resolution remain unchanged. The number of dimension labels may decrease.
"""
return _process('trim_cube', data=data)
def variance(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Variance
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if any value is such a value.
:return: The computed sample variance.
"""
return _process('variance', data=data, ignore_nodata=ignore_nodata)
def xor(x, y) -> ProcessBuilder:
"""
Logical XOR (exclusive or)
:param x: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical XOR.
"""
return _process('xor', x=x, y=y)
```
#### File: openeo/rest/imagecollectionclient.py
```python
import copy
import datetime
import logging
import pathlib
import typing
from typing import List, Dict, Union, Tuple
from shapely.geometry import Polygon, MultiPolygon, mapping
from openeo.imagecollection import ImageCollection
from openeo.internal.graphbuilder_040 import GraphBuilder
from openeo.metadata import CollectionMetadata
from openeo.rest import BandMathException
from openeo.rest.job import RESTJob
from openeo.rest.service import Service
from openeo.util import get_temporal_extent, legacy_alias, dict_no_none, guess_format
if hasattr(typing, 'TYPE_CHECKING') and typing.TYPE_CHECKING:
# Imports for type checking only (circular import issue at runtime). `hasattr` is Python 3.5 workaround #210
from openeo.rest.connection import Connection
_log = logging.getLogger(__name__)
class ImageCollectionClient(ImageCollection):
"""Class representing an Image Collection. (In the API as 'imagery')
Supports 0.4.
"""
def __init__(self, node_id: str, builder: GraphBuilder, session: 'Connection', metadata: CollectionMetadata = None):
self.node_id = node_id
self.builder= builder
self.session = session
self.graph = builder.processes
self.metadata = CollectionMetadata.get_or_create(metadata)
def __str__(self):
return "ImageCollection: %s" % self.node_id
@property
def _api_version(self):
return self.session.capabilities().api_version_check
@property
def connection(self):
return self.session
@classmethod
def load_collection(
cls, collection_id: str, session: 'Connection' = None,
spatial_extent: Union[Dict[str, float], None] = None,
temporal_extent: Union[List[Union[str,datetime.datetime,datetime.date]], None] = None,
bands: Union[List[str], None] = None,
fetch_metadata=True
):
"""
Create a new Image Collection/Raster Data cube.
:param collection_id: A collection id, should exist in the backend.
:param session: The session to use to connect with the backend.
:param spatial_extent: limit data to specified bounding box or polygons
:param temporal_extent: limit data to specified temporal interval
:param bands: only add the specified bands
:return:
"""
# TODO: rename function to load_collection for better similarity with corresponding process id?
builder = GraphBuilder()
process_id = 'load_collection'
normalized_temporal_extent = list(get_temporal_extent(extent=temporal_extent)) if temporal_extent is not None else None
arguments = {
'id': collection_id,
'spatial_extent': spatial_extent,
'temporal_extent': normalized_temporal_extent,
}
metadata = session.collection_metadata(collection_id) if fetch_metadata else None
if bands:
if isinstance(bands, str):
bands = [bands]
if metadata:
bands = [metadata.band_dimension.band_name(b, allow_common=False) for b in bands]
arguments['bands'] = bands
node_id = builder.process(process_id, arguments)
if bands:
metadata = metadata.filter_bands(bands)
return cls(node_id, builder, session, metadata=metadata)
create_collection = legacy_alias(load_collection, "create_collection")
@classmethod
def load_disk_collection(cls, session: 'Connection', file_format: str, glob_pattern: str, **options) -> 'ImageCollection':
"""
Loads image data from disk as an ImageCollection.
:param session: The session to use to connect with the backend.
:param file_format: the file format, e.g. 'GTiff'
:param glob_pattern: a glob pattern that matches the files to load from disk
:param options: options specific to the file format
:return: the data as an ImageCollection
"""
builder = GraphBuilder()
process_id = 'load_disk_data'
arguments = {
'format': file_format,
'glob_pattern': glob_pattern,
'options': options
}
node_id = builder.process(process_id, arguments)
return cls(node_id, builder, session, metadata={})
def _filter_temporal(self, start: str, end: str) -> 'ImageCollection':
return self.graph_add_process(
process_id='filter_temporal',
args={
'data': {'from_node': self.node_id},
'extent': [start, end]
}
)
def filter_bbox(self, west, east, north, south, crs=None, base=None, height=None) -> 'ImageCollection':
extent = {'west': west, 'east': east, 'north': north, 'south': south}
extent.update(dict_no_none(crs=crs, base=base, height=height))
return self.graph_add_process(
process_id='filter_bbox',
args={
'data': {'from_node': self.node_id},
'extent': extent
}
)
def filter_bands(self, bands: Union[List[Union[str, int]], str]) -> 'ImageCollection':
"""
Filter the imagery by the given bands
:param bands: list of band names, common names or band indices. Single band name can also be given as string.
:return a DataCube instance
"""
if isinstance(bands, str):
bands = [bands]
bands = [self.metadata.band_dimension.band_name(b) for b in bands]
im = self.graph_add_process(
process_id='filter_bands',
args={
'data': {'from_node': self.node_id},
'bands': [b for b in bands if b in self.metadata.band_names],
'common_names': [b for b in bands if b in self.metadata.band_common_names]
})
if im.metadata:
im.metadata = im.metadata.filter_bands(bands)
return im
band_filter = legacy_alias(filter_bands, "band_filter")
def band(self, band: Union[str, int]) -> 'ImageCollection':
"""Filter the imagery by the given bands
:param band: band name, band common name or band index.
:return An ImageCollection instance
"""
process_id = 'reduce'
band_index = self.metadata.get_band_index(band)
args = {
'data': {'from_node': self.node_id},
'dimension': self.metadata.band_dimension.name,
'reducer': {
'callback': {
'r1': {
'arguments': {
'data': {
'from_argument': 'data'
},
'index': band_index
},
'process_id': 'array_element',
'result': True
}
}
}
}
return self.graph_add_process(process_id, args)
def resample_spatial(self, resolution: Union[float, Tuple[float, float]],
projection: Union[int, str] = None, method: str = 'near', align: str = 'upper-left'):
return self.graph_add_process('resample_spatial', {
'data': {'from_node': self.node_id},
'resolution': resolution,
'projection': projection,
'method': method,
'align': align
})
def subtract(self, other:Union[ImageCollection,Union[int,float]]):
"""
Subtract other from this datacube, so the result is: this - other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "subtract"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def divide(self, other:Union[ImageCollection,Union[int,float]]):
"""
Subtraction other from this datacube, so the result is: this - other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "divide"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def product(self, other:Union[ImageCollection,Union[int,float]]):
"""
Multiply other with this datacube, so the result is: this * other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "product"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def logical_or(self, other: ImageCollection):
"""
Apply element-wise logical `or` operation
:param other:
:return ImageCollection: logical_or(this, other)
"""
return self._reduce_bands_binary(operator='or', other=other,arg_name='expressions')
def logical_and(self, other: ImageCollection):
"""
Apply element-wise logical `and` operation
:param other:
:return ImageCollection: logical_and(this, other)
"""
return self._reduce_bands_binary(operator='and', other=other,arg_name='expressions')
def __invert__(self):
"""
:return:
"""
operator = 'not'
my_builder = self._get_band_graph_builder()
new_builder = None
extend_previous_callback_graph = my_builder is not None
# TODO: why does these `add_process` calls use "expression" instead of "data" like the other cases?
if not extend_previous_callback_graph:
new_builder = GraphBuilder()
# TODO merge both process graphs?
new_builder.add_process(operator, expression={'from_argument': 'data'}, result=True)
else:
new_builder = my_builder.copy()
current_result = new_builder.find_result_node_id()
new_builder.processes[current_result]['result'] = False
new_builder.add_process(operator, expression={'from_node': current_result}, result=True)
return self._create_reduced_collection(new_builder, extend_previous_callback_graph)
def __ne__(self, other: Union[ImageCollection, Union[int, float]]):
return self._reduce_bands_binary_xy('neq', other)
def __eq__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pixelwise comparison of this data cube with another cube or constant.
:param other: Another data cube, or a constant
:return:
"""
return self._reduce_bands_binary_xy('eq', other)
def __gt__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise comparison of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
return self._reduce_bands_binary_xy('gt', other)
def __ge__(self, other:Union[ImageCollection,Union[int,float]]):
return self._reduce_bands_binary_xy('gte', other)
def __lt__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise comparison of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
return self._reduce_bands_binary_xy('lt', other)
def __le__(self, other:Union[ImageCollection,Union[int,float]]):
return self._reduce_bands_binary_xy('lte',other)
def _create_reduced_collection(self, callback_graph_builder, extend_previous_callback_graph):
if not extend_previous_callback_graph:
# there was no previous reduce step
args = {
'data': {'from_node': self.node_id},
'dimension': self.metadata.band_dimension.name,
'reducer': {
'callback': callback_graph_builder.processes
}
}
return self.graph_add_process("reduce", args)
else:
process_graph_copy = self.builder.shallow_copy()
process_graph_copy.processes[self.node_id]['arguments']['reducer']['callback'] = callback_graph_builder.processes
# now current_node should be a reduce node, let's modify it
# TODO: properly update metadata of reduced cube? #metadatareducedimension
return ImageCollectionClient(self.node_id, process_graph_copy, self.session, metadata=self.metadata)
def __truediv__(self, other):
return self.divide(other)
def __sub__(self, other):
return self.subtract(other)
def __radd__(self, other):
return self.add(other)
def __add__(self, other):
return self.add(other)
def __neg__(self):
return self.product(-1)
def __mul__(self, other):
return self.product(other)
def __rmul__(self, other):
return self.product(other)
def __or__(self, other):
return self.logical_or(other)
def __and__(self, other):
return self.logical_and(other)
def add(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise addition of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
operator = "sum"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def _reduce_bands_binary(self, operator, other: 'ImageCollectionClient',arg_name='data'):
# first we create the callback
my_builder = self._get_band_graph_builder()
other_builder = other._get_band_graph_builder()
merged = GraphBuilder.combine(
operator=operator,
first=my_builder or {'from_argument': 'data'},
second=other_builder or {'from_argument': 'data'},
arg_name=arg_name)
# callback is ready, now we need to properly set up the reduce process that will invoke it
if my_builder is None and other_builder is None:
# there was no previous reduce step, perhaps this is a cube merge?
# cube merge is happening when node id's differ, otherwise we can use regular reduce
if (self.node_id != other.node_id):
# we're combining data from two different datacubes: http://api.openeo.org/v/0.4.0/processreference/#merge_cubes
# set result node id's first, to keep track
my_builder = self.builder
my_builder.processes[self.node_id]['result'] = True
other_builder = other.builder
other_builder.processes[other.node_id]['result'] = True
cubes_merged = GraphBuilder.combine(operator="merge_cubes",
first=my_builder,
second=other_builder, arg_name="cubes")
node_id = cubes_merged.find_result_node_id()
the_node = cubes_merged.processes[node_id]
the_node["result"] = False
cubes = the_node["arguments"]["cubes"]
the_node["arguments"]["cube1"] = cubes[0]
the_node["arguments"]["cube2"] = cubes[1]
del the_node["arguments"]["cubes"]
#there can be only one process for now
cube_list = list(merged.processes.values())[0]["arguments"][arg_name]
assert len(cube_list) == 2
# it is really not clear if this is the agreed way to go
cube_list[0]["from_argument"] = "x"
cube_list[1]["from_argument"] = "y"
the_node["arguments"]["overlap_resolver"] = {
'callback': merged.processes
}
the_node["arguments"]["binary"] = True
return ImageCollectionClient(node_id, cubes_merged, self.session, metadata=self.metadata)
else:
args = {
'data': {'from_node': self.node_id},
'reducer': {
'callback': merged.processes
}
}
return self.graph_add_process("reduce", args)
else:
left_data_arg = self.builder.processes[self.node_id]["arguments"]["data"]
right_data_arg = other.builder.processes[other.node_id]["arguments"]["data"]
if left_data_arg != right_data_arg:
raise BandMathException("'Band math' between bands of different image collections is not supported yet.")
node_id = self.node_id
reducing_graph = self
if reducing_graph.graph[node_id]["process_id"] != "reduce":
node_id = other.node_id
reducing_graph = other
new_builder = reducing_graph.builder.shallow_copy()
new_builder.processes[node_id]['arguments']['reducer']['callback'] = merged.processes
# now current_node should be a reduce node, let's modify it
# TODO: properly update metadata of reduced cube? #metadatareducedimension
return ImageCollectionClient(node_id, new_builder, reducing_graph.session, metadata=self.metadata)
def _reduce_bands_binary_xy(self,operator,other:Union[ImageCollection,Union[int,float]]):
"""
Pixelwise comparison of this data cube with another cube or constant.
:param other: Another data cube, or a constant
:return:
"""
if isinstance(other, int) or isinstance(other, float):
my_builder = self._get_band_graph_builder()
new_builder = None
extend_previous_callback_graph = my_builder is not None
if not extend_previous_callback_graph:
new_builder = GraphBuilder()
# TODO merge both process graphs?
new_builder.add_process(operator, x={'from_argument': 'data'}, y = other, result=True)
else:
new_builder = my_builder.shallow_copy()
current_result = new_builder.find_result_node_id()
new_builder.processes[current_result]['result'] = False
new_builder.add_process(operator, x={'from_node': current_result}, y = other, result=True)
return self._create_reduced_collection(new_builder, extend_previous_callback_graph)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def _reduce_bands_binary_const(self, operator, other:Union[int,float]):
my_builder = self._get_band_graph_builder()
new_builder = None
extend_previous_callback_graph = my_builder is not None
if not extend_previous_callback_graph:
new_builder = GraphBuilder()
# TODO merge both process graphs?
new_builder.add_process(operator, data=[{'from_argument': 'data'}, other], result=True)
else:
current_result = my_builder.find_result_node_id()
new_builder = my_builder.shallow_copy()
new_builder.processes[current_result]['result'] = False
new_builder.add_process(operator, data=[{'from_node': current_result}, other], result=True)
return self._create_reduced_collection(new_builder,extend_previous_callback_graph)
def _get_band_graph_builder(self):
current_node = self.graph[self.node_id]
if current_node["process_id"] == "reduce":
# TODO: check "dimension" of "reduce" in some way?
callback_graph = current_node["arguments"]["reducer"]["callback"]
return GraphBuilder.from_process_graph(callback_graph)
return None
def add_dimension(self, name: str, label: Union[str, int, float], type: str = "other"):
if type == "bands" and self.metadata.has_band_dimension():
# TODO: remove old "bands" dimension in appropriate places (see #metadatareducedimension)
_log.warning('Adding new "bands" dimension on top of existing one.')
return self.graph_add_process(
process_id='add_dimension',
args={
'data': {'from_node': self.node_id},
'name': name, 'value': label, 'type': type,
},
metadata=self.metadata.add_dimension(name, label, type)
)
def zonal_statistics(self, regions, func, scale=1000, interval="day") -> 'ImageCollection':
"""Calculates statistics for each zone specified in a file.
:param regions: GeoJSON or a path to a GeoJSON file containing the
regions. For paths you must specify the path to a
user-uploaded file without the user id in the path.
:param func: Statistical function to calculate for the specified
zones. example values: min, max, mean, median, mode
:param scale: A nominal scale in meters of the projection to work
in. Defaults to 1000.
:param interval: Interval to group the time series. Allowed values:
day, wee, month, year. Defaults to day.
:return: An ImageCollection instance
"""
regions_geojson = regions
if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):
regions_geojson = mapping(regions)
process_id = 'zonal_statistics'
args = {
'data': {'from_node': self.node_id},
'regions': regions_geojson,
'func': func,
'scale': scale,
'interval': interval
}
return self.graph_add_process(process_id, args)
def apply_dimension(self, code: str, runtime=None, version="latest", dimension='t', target_dimension=None) -> 'ImageCollection':
"""
Applies an n-ary process (i.e. takes an array of pixel values instead of a single pixel value) to a raster data cube.
In contrast, the process apply applies an unary process to all pixel values.
By default, apply_dimension applies the the process on all pixel values in the data cube as apply does, but the parameter dimension can be specified to work only on a particular dimension only. For example, if the temporal dimension is specified the process will work on a time series of pixel values.
The n-ary process must return as many elements in the returned array as there are in the input array. Otherwise a CardinalityChanged error must be returned.
:param code: UDF code or process identifier
:param runtime:
:param version:
:param dimension:
:return:
:raises: CardinalityChangedError
"""
process_id = 'apply_dimension'
if runtime:
callback = {
'udf': self._create_run_udf(code, runtime, version)
}
else:
callback = {
'process': {
"arguments": {
"data": {
"from_argument": "data"
}
},
"process_id": code,
"result": True
}
}
args = {
'data': {
'from_node': self.node_id
},
'dimension': self.metadata.assert_valid_dimension(dimension),
'process': {
'callback': callback
}
}
return self.graph_add_process(process_id, args)
def reduce_bands_udf(self, code: str, runtime="Python", version="latest") -> 'ImageCollection':
"""
Reduce "band" dimension with a UDF
"""
process_id = 'reduce'
args = {
'data': {
'from_node': self.node_id
},
'dimension': self.metadata.band_dimension.name,
'binary': False,
'reducer': {
'callback': {
'udf': self._create_run_udf(code, runtime, version)
}
}
}
return self.graph_add_process(process_id, args)
def _create_run_udf(self, code, runtime, version):
return {
"arguments": {
"data": {
"from_argument": "data"
},
"runtime": runtime,
"version": version,
"udf": code
},
"process_id": "run_udf",
"result": True
}
def reduce_temporal_udf(self, code: str, runtime="Python", version="latest"):
"""
Apply reduce (`reduce_dimension`) process with given UDF along temporal dimension.
:param code: The UDF code, compatible with the given runtime and version
:param runtime: The UDF runtime
:param version: The UDF runtime version
"""
process_id = 'reduce'
args = {
'data': {
'from_node': self.node_id
},
'dimension': self.metadata.temporal_dimension.name,
'binary': False,
'reducer': {
'callback': {
'udf': self._create_run_udf(code, runtime, version)
}
}
}
return self.graph_add_process(process_id, args)
reduce_tiles_over_time = legacy_alias(reduce_temporal_udf, "reduce_tiles_over_time")
def apply(self, process: str, data_argument='data',arguments={}) -> 'ImageCollection':
process_id = 'apply'
arguments[data_argument] = \
{
"from_argument": data_argument
}
args = {
'data': {'from_node': self.node_id},
'process':{
'callback':{
"unary":{
"arguments":arguments,
"process_id":process,
"result":True
}
}
}
}
return self.graph_add_process(process_id, args)
def _reduce_time(self, reduce_function = "max"):
process_id = 'reduce'
args = {
'data': {'from_node': self.node_id},
'dimension': self.metadata.temporal_dimension.name,
'reducer': {
'callback': {
'r1': {
'arguments': {
'data': {
'from_argument': 'data'
}
},
'process_id': reduce_function,
'result': True
}
}
}
}
return self.graph_add_process(process_id, args)
def min_time(self) -> 'ImageCollection':
"""Finds the minimum value of a time series for all bands of the input dataset.
:return: An ImageCollection instance
"""
return self._reduce_time(reduce_function="min")
def max_time(self) -> 'ImageCollection':
"""
Finds the maximum value of a time series for all bands of the input dataset.
:return: An ImageCollection instance
"""
return self._reduce_time(reduce_function="max")
def mean_time(self) -> 'ImageCollection':
"""Finds the mean value of a time series for all bands of the input dataset.
:return: An ImageCollection instance
"""
return self._reduce_time(reduce_function="mean")
def median_time(self) -> 'ImageCollection':
"""Finds the median value of a time series for all bands of the input dataset.
:return: An ImageCollection instance
"""
return self._reduce_time(reduce_function="median")
def count_time(self) -> 'ImageCollection':
"""Counts the number of images with a valid mask in a time series for all bands of the input dataset.
:return: An ImageCollection instance
"""
return self._reduce_time(reduce_function="count")
def ndvi(self, name="ndvi") -> 'ImageCollection':
""" Normalized Difference Vegetation Index (NDVI)
:param name: Name of the newly created band
:return: An ImageCollection instance
"""
process_id = 'ndvi'
args = {
'data': {'from_node': self.node_id},
'name': name
}
return self.graph_add_process(process_id, args)
def normalized_difference(self, other: ImageCollection) -> 'ImageCollection':
return self._reduce_bands_binary("normalized_difference", other)
def linear_scale_range(self, input_min, input_max, output_min, output_max) -> 'ImageCollection':
""" Color stretching
:param input_min: Minimum input value
:param input_max: Maximum input value
:param output_min: Minimum output value
:param output_max: Maximum output value
:return An ImageCollection instance
"""
process_id = 'linear_scale_range'
args = {
'x': {'from_node': self.node_id},
'inputMin': input_min,
'inputMax': input_max,
'outputMin': output_min,
'outputMax': output_max
}
return self.graph_add_process(process_id, args)
def mask(self, polygon: Union[Polygon, MultiPolygon,str]=None, srs=None, rastermask: 'ImageCollection'=None,
replacement=None) -> 'ImageCollection':
"""
Mask the image collection using either a polygon or a raster mask.
All pixels outside the polygon should be set to the nodata value.
All pixels inside, or intersecting the polygon should retain their original value.
All pixels are replaced for which the corresponding pixels in the mask are non-zero (for numbers) or True
(for boolean values).
The pixel values are replaced with the value specified for replacement, which defaults to None (no data).
No data values will be left untouched by the masking operation.
# TODO: just provide a single `mask` argument and detect the type: polygon or process graph
# TODO: also see `mask` vs `mask_polygon` processes in https://github.com/Open-EO/openeo-processes/pull/110
:param polygon: A polygon, provided as a :class:`shapely.geometry.Polygon` or :class:`shapely.geometry.MultiPolygon`, or a filename pointing to a valid vector file
:param srs: The reference system of the provided polygon, by default this is Lat Lon (EPSG:4326).
:param rastermask: the raster mask
:param replacement: the value to replace the masked pixels with
:raise: :class:`ValueError` if a polygon is supplied and its area is 0.
:return: A new ImageCollection, with the mask applied.
"""
mask = None
new_collection = None
if polygon is not None:
if isinstance(polygon, (str, pathlib.Path)):
# TODO: default to loading file client side?
# TODO: change read_vector to load_uploaded_files https://github.com/Open-EO/openeo-processes/pull/106
new_collection = self.graph_add_process('read_vector', args={
'filename': str(polygon)
})
mask = {
'from_node': new_collection.node_id
}
else:
if polygon.area == 0:
raise ValueError("Mask {m!s} has an area of {a!r}".format(m=polygon, a=polygon.area))
geojson = mapping(polygon)
if srs:
geojson['crs'] = {'type': 'name', 'properties': {'name': srs}}
mask = geojson
new_collection = self
elif rastermask is not None:
mask_node = rastermask.graph[rastermask.node_id]
mask_node['result']=True
new_collection = self._graph_merge(rastermask.graph)
#mask node id may have changed!
mask_id = new_collection.builder.find_result_node_id()
mask_node = new_collection.graph[mask_id]
mask_node['result']=False
mask = {
'from_node': mask_id
}
else:
raise AttributeError("mask process: either a polygon or a rastermask should be provided.")
process_id = 'mask'
args = {
'data': {'from_node': self.node_id},
'mask': mask
}
if replacement is not None:
args['replacement'] = replacement
return new_collection.graph_add_process(process_id, args)
def merge(self, other: 'ImageCollection', overlap_resolver: str = None) -> 'ImageCollection':
other_node = other.graph[other.node_id]
other_node['result'] = True
new_collection = self._graph_merge(other.graph)
# mask node id may have changed!
mask_id = new_collection.builder.find_result_node_id()
other_node = new_collection.graph[mask_id]
other_node['result'] = False
cube2 = {
'from_node': mask_id
}
args = {
'cube1': {'from_node': self.node_id},
'cube2': cube2
}
if overlap_resolver:
# Assume simple math operation
# TODO support general overlap resolvers.
assert isinstance(overlap_resolver, str)
args["overlap_resolver"] = {"callback": {"r1": {
"process_id": overlap_resolver,
"arguments": {"data": [{"from_argument": "x"}, {"from_argument": "y"}]},
"result": True,
}}}
args["binary"] = True
return new_collection.graph_add_process('merge_cubes', args)
def apply_kernel(self, kernel, factor=1.0, border = 0, replace_invalid=0) -> 'ImageCollection':
"""
Applies a focal operation based on a weighted kernel to each value of the specified dimensions in the data cube.
:param kernel: The kernel to be applied on the data cube. It should be a 2D numpy array.
:param factor: A factor that is multiplied to each value computed by the focal operation. This is basically a shortcut for explicitly multiplying each value by a factor afterwards, which is often required for some kernel-based algorithms such as the Gaussian blur.
:return: A data cube with the newly computed values. The resolution, cardinality and the number of dimensions are the same as for the original data cube.
"""
return self.graph_add_process('apply_kernel', {
'data': {'from_node': self.node_id},
'kernel':kernel.tolist(),
'factor':factor,
'border': border,
'replace_invalid': replace_invalid
})
####VIEW methods #######
def polygonal_mean_timeseries(self, polygon: Union[Polygon, MultiPolygon, str]) -> 'ImageCollection':
"""
Extract a mean time series for the given (multi)polygon. Its points are
expected to be in the EPSG:4326 coordinate
reference system.
:param polygon: The (multi)polygon; or a file path or HTTP URL to a GeoJSON file or shape file
:return: ImageCollection
"""
return self._polygonal_timeseries(polygon, "mean")
def polygonal_histogram_timeseries(self, polygon: Union[Polygon, MultiPolygon, str]) -> 'ImageCollection':
"""
Extract a histogram time series for the given (multi)polygon. Its points are
expected to be in the EPSG:4326 coordinate
reference system.
:param polygon: The (multi)polygon; or a file path or HTTP URL to a GeoJSON file or shape file
:return: ImageCollection
"""
return self._polygonal_timeseries(polygon, "histogram")
def polygonal_median_timeseries(self, polygon: Union[Polygon, MultiPolygon, str]) -> 'ImageCollection':
"""
Extract a median time series for the given (multi)polygon. Its points are
expected to be in the EPSG:4326 coordinate
reference system.
:param polygon: The (multi)polygon; or a file path or HTTP URL to a GeoJSON file or shape file
:return: ImageCollection
"""
return self._polygonal_timeseries(polygon, "median")
def polygonal_standarddeviation_timeseries(self, polygon: Union[Polygon, MultiPolygon, str]) -> 'ImageCollection':
"""
Extract a time series of standard deviations for the given (multi)polygon. Its points are
expected to be in the EPSG:4326 coordinate
reference system.
:param polygon: The (multi)polygon; or a file path or HTTP URL to a GeoJSON file or shape file
:return: ImageCollection
"""
return self._polygonal_timeseries(polygon, "sd")
def _polygonal_timeseries(self, polygon: Union[Polygon, MultiPolygon, str], func: str) -> 'ImageCollection':
def graph_add_aggregate_process(graph) -> 'ImageCollection':
process_id = 'aggregate_polygon'
args = {
'data': {'from_node': self.node_id},
'polygons': polygons,
'reducer': {
'callback': {
"unary": {
"arguments": {
"data": {
"from_argument": "data"
}
},
"process_id": func,
"result": True
}
}
}
}
return graph.graph_add_process(process_id, args)
if isinstance(polygon, str):
with_read_vector = self.graph_add_process('read_vector', args={
'filename': polygon
})
polygons = {
'from_node': with_read_vector.node_id
}
return graph_add_aggregate_process(with_read_vector)
else:
polygons = mapping(polygon)
return graph_add_aggregate_process(self)
def save_result(self, format: str = "GTIFF", options: dict = None):
return self.graph_add_process(
process_id="save_result",
args={
"data": {"from_node": self.node_id},
"format": format,
"options": options or {}
}
)
def download(self, outputfile: str = None, format: str = None, options: dict = None):
"""Download image collection, e.g. as GeoTIFF."""
if not format:
format = guess_format(outputfile) if outputfile else "GTiff"
newcollection = self.save_result(format=format, options=options)
newcollection.graph[newcollection.node_id]["result"] = True
return self.session.download(newcollection.graph, outputfile)
def tiled_viewing_service(self, type: str, **kwargs) -> Service:
self.graph[self.node_id]['result'] = True
return self.session.create_service(self.graph, type=type, **kwargs)
def execute_batch(
self,
outputfile: Union[str, pathlib.Path], out_format: str = None,
print=print, max_poll_interval=60, connection_retry_interval=30,
job_options=None, **format_options):
"""
Evaluate the process graph by creating a batch job, and retrieving the results when it is finished.
This method is mostly recommended if the batch job is expected to run in a reasonable amount of time.
For very long running jobs, you probably do not want to keep the client running.
:param job_options:
:param outputfile: The path of a file to which a result can be written
:param out_format: String Format of the job result.
:param format_options: String Parameters for the job result format
"""
job = self.send_job(out_format, job_options=job_options, **format_options)
return job.run_synchronous(
# TODO #135 support multi file result sets too
outputfile=outputfile,
print=print, max_poll_interval=max_poll_interval, connection_retry_interval=connection_retry_interval
)
def send_job(
self, out_format=None, title: str = None, description: str = None, plan: str = None, budget=None,
job_options=None, **format_options
) -> RESTJob:
"""
Sends a job to the backend and returns a Job instance. The job will still need to be started and managed explicitly.
The :func:`~openeo.imagecollection.ImageCollection.execute_batch` method allows you to run batch jobs without managing it.
:param out_format: String Format of the job result.
:param job_options: A dictionary containing (custom) job options
:param format_options: String Parameters for the job result format
:return: status: Job resulting job.
"""
img = self
if out_format:
# add `save_result` node
img = img.save_result(format=out_format, options=format_options)
img.graph[img.node_id]["result"] = True
return self.session.create_job(
process_graph=img.graph,
title=title, description=description, plan=plan, budget=budget, additional=job_options
)
def execute(self) -> Dict:
"""Executes the process graph of the imagery. """
newbuilder = self.builder.shallow_copy()
newbuilder.processes[self.node_id]['result'] = True
return self.session.execute(newbuilder.processes)
####### HELPER methods #######
def _graph_merge(self, other_graph:Dict):
newbuilder = self.builder.shallow_copy()
merged = newbuilder.merge(GraphBuilder.from_process_graph(other_graph))
# TODO: properly update metadata as well?
newCollection = ImageCollectionClient(self.node_id, merged, self.session, metadata=self.metadata)
return newCollection
def graph_add_process(self, process_id: str, args: dict,
metadata: CollectionMetadata = None) -> 'ImageCollectionClient':
"""
Returns a new imagecollection with an added process with the given process
id and a dictionary of arguments
:param process_id: String, Process Id of the added process.
:param args: Dict, Arguments of the process.
:return: new ImageCollectionClient instance
"""
#don't modify in place, return new builder
newbuilder = self.builder.shallow_copy()
id = newbuilder.process(process_id,args)
# TODO: properly update metadata as well?
newCollection = ImageCollectionClient(
node_id=id, builder=newbuilder, session=self.session, metadata=metadata or copy.copy(self.metadata)
)
return newCollection
def to_graphviz(self):
"""
Build a graphviz DiGraph from the process graph
:return:
"""
# pylint: disable=import-error, import-outside-toplevel
import graphviz
import pprint
graph = graphviz.Digraph(node_attr={"shape": "none", "fontname": "sans", "fontsize": "11"})
for name, process in self.graph.items():
args = process.get("arguments", {})
# Build label
label = '<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">'
label += '<TR><TD COLSPAN="2" BGCOLOR="#eeeeee">{pid}</TD></TR>'.format(pid=process["process_id"])
label += "".join(
'''<TR><TD ALIGN="RIGHT">{arg}</TD>
<TD ALIGN="LEFT"><FONT FACE="monospace">{value}</FONT></TD></TR>'''.format(
arg=k, value=pprint.pformat(v)[:1000].replace('\n', '<BR/>')
) for k, v in sorted(args.items())
)
label += '</TABLE>>'
# Add node and edges to graph
graph.node(name, label=label)
for arg in args.values():
if isinstance(arg, dict) and "from_node" in arg:
graph.edge(arg["from_node"], name)
# TODO: add subgraph for "callback" arguments?
return graph
``` |
{
"source": "jonathonball/adventofcode",
"score": 4
} |
#### File: 2019/3/3-1.py
```python
import sys
import math
class Point:
def __init__(self, panel, location, wire, x, y):
self.panel = panel
self.location = location
self.wire = wire
self.x = x
self.y = y
def distance_from_center(self):
self.distance_from_center = math.fabs(self.x) + math.fabs(self.y)
class Panel:
def __init__(self):
self.data = {}
self.intersections = {}
self.total = 0
def location(self, x, y):
"""Return string with index for self.data"""
return str(x) + ":" + str(y)
def get(self, x, y):
"""Return a Point object or None"""
location = self.location(x, y)
try:
return self.data[location]
except KeyError:
return None
def set(self, x, y, wire):
"""Create a new wire segment or update an existing one"""
location = self.location(x, y)
existing = self.get(x, y)
if not existing:
point = Point(self, location, wire, x, y)
self.data[location] = point
return point
if existing.wire != wire:
existing.wire = "Intersection"
existing.distance_from_center()
return existing
def reset(self):
"""Reset for new wire"""
self.x = 0
self.y = 0
def add_wire(self, wire, data):
"""Parse vector data for new wire"""
self.reset()
for vector in data:
direction = vector[0]
magnitude = int(vector[1:])
for _ in range(magnitude):
if direction == "U":
self.y += 1
elif direction == "D":
self.y -= 1
elif direction == "L":
self.x -= 1
else:
self.x += 1
self.set(self.x, self.y, wire)
def gather_intersections(self):
"""Gather identified intersections into a list"""
self.intersections = [
self.data[key]
for key in self.data.keys() if self.data[key].wire == "Intersection"
]
def find_shortest_manhatten(self):
"""Find the intersection with the shortest distance from 0,0"""
shortest = self.intersections[0].distance_from_center
for intersection in self.intersections:
if intersection.distance_from_center < shortest:
shortest = intersection.distance_from_center
return shortest
panel = Panel()
wires = [ line.strip().split(',') for line in sys.stdin ]
for wire, data in enumerate(wires):
panel.add_wire(wire, data)
panel.gather_intersections()
print(panel.find_shortest_manhatten())
```
#### File: 2019/4/4-1.py
```python
class FuelDepotCracker:
def __init__(self):
self.minimum = 271973
self.maximum = 785961
self.position = self.minimum
def is_valid(self, value):
"""Returns boolean is valid fuel depot password?"""
has_duplicate = False
numbers = [n for n in str(value)]
for index in range(1, 6):
if numbers[index - 1] == numbers[index]:
has_duplicate = True
if numbers[index - 1] > numbers[index]:
return False
return has_duplicate
def check_values(self):
"""Iterates through all potential values to determine valid passwords"""
self.winners = []
for candidate in range(self.minimum, self.maximum + 1):
if self.is_valid(candidate):
self.winners.append(candidate)
def number_of_winners(self):
return len(self.winners)
cracker = FuelDepotCracker()
cracker.check_values()
print(cracker.number_of_winners())
```
#### File: 2020/3/3-1.py
```python
import sys
class Matrix:
def __init__(self, rise, run):
self.data = []
self.rise = rise
self.run = run
self.x = 0
self.y = 0
self.trees = 0
def import_data(self, stream):
for string in stream:
characters = string.strip()
row = self.create_row(characters)
self.data.append(row)
def create_row(self, data):
return [ character for character in data ]
def count_trees(self):
number_of_trees = 0
if self.is_tree():
number_of_trees += 1
while(self.move_down_slope()):
if self.is_tree():
number_of_trees += 1
self.trees = number_of_trees
def is_tree(self):
if self.data[self.y][self.x] == "#":
return True
return False
def column_width(self, index):
return len(self.data[index])
def move_down_slope(self):
number_of_rows = len(self.data)
self.y += self.rise
if self.y >= number_of_rows:
return False
number_of_columns = self.column_width(self.y)
self.x += self.run
if self.x >= number_of_columns:
self.x -= number_of_columns
return True
m = Matrix(1, 3)
m.import_data(sys.stdin)
m.count_trees()
print(m.trees)
```
#### File: 2020/4/4-1.py
```python
import sys
class Field:
valid_fields = {
"byr", # (Birth Year)
"iyr", # (Issue Year)
"eyr", # (Expiration Year)
"hgt", # (Height)
"hcl", # (Hair Color)
"ecl", # (Eye Color)
"pid", # (Passport ID)
"cid" # (Country ID)
}
def __init__(self, key, value):
self.key = key
self.value = value
def is_valid(self):
return self.key in self.valid_fields
def __str__(self):
return self.value
class Passport:
def __init__(self):
self.fields = {
"byr": None, # (Birth Year)
"iyr": None, # (Issue Year)
"eyr": None, # (Expiration Year)
"hgt": None, # (Height)
"hcl": None, # (Hair Color)
"ecl": None, # (Eye Color)
"pid": None, # (Passport ID)
}
self.required_keys = set(self.fields.keys())
def add_field(self, key, value):
field = Field(key, value)
if field.is_valid():
self.fields[key] = value
def is_valid(self):
for key in self.required_keys:
if self.fields[key] == None:
return False
return True
# Code execution starts here
passports = []
passport = Passport()
for line in sys.stdin:
if line == "\n":
if passport.is_valid():
passports.append(passport)
passport = Passport()
else:
new_fields = line.strip().split(" ")
for new_field in new_fields:
key, value = new_field.split(":")
passport.add_field(key, value)
print(len(passports))
``` |
{
"source": "jonathonball/video-sorting",
"score": 3
} |
#### File: video-sorting/videoindex/stat.py
```python
class Stat:
def __init__(self, value):
self.min = None
self.max = None
self.average = None
self.values = []
self.append(value)
def add(self, value):
if hasattr(value, '__iter__'):
self.values.extend(value)
else:
self.values.append(value)
self.min = min(self.values)
self.max = max(self.values)
``` |
{
"source": "jonathon-chew/RedditThreadTracker",
"score": 3
} |
#### File: jonathon-chew/RedditThreadTracker/main.py
```python
import praw
import os
from pprint import pprint
print("Logging into Reddit")
#Get into Reddit
reddit = praw.Reddit(client_id=' ',
client_secret=' ',
user_agent='< ')
print("Logged in to Reddit")
def newLookUp():
# Get the subreddit
desiredSubreddit = input("What is the name of the subreddit, excluding the /r: ")
desiredSubreddit = desiredSubreddit.strip()
# desiredSubreddit = "excel"
desiredSubreddit = reddit.subreddit(str(desiredSubreddit))
print("Got to the subReddit")
print("Getting the submission")
titleOfPost = input ("What is the title of the post? ")
titleOfPost = titleOfPost.strip()
origionalAuthor = input("Just to be sure please enter the name of the author here: ")
origionalAuthor = origionalAuthor.strip()
# titleOfPost = "Autofilling a referenced text"
# origionalAuthor = "EdlsslyFscntngGurl"
counter = 1
for submission in desiredSubreddit.top("year", limit=None):
print(f"I'm looking for the post {counter}")
counter = counter + 1
if titleOfPost == submission.title:
print("I've found the title of the post'")
if origionalAuthor == submission.author:
print("I've found the author's submission of this")
print("Getting comments")
submission.comments.replace_more(limit=100) # flatten tree
comments = submission.comments.list() # all comments
titleOfPost = titleOfPost.title()
fileName = titleOfPost + " Author: " + origionalAuthor + " Subreddit: " + str(desiredSubreddit) + ".txt"
os.chdir(" INITIAL FILE PLACE ")
with open (fileName, "w") as f:
print("Made the file")
f.write(f'{titleOfPost} written by {origionalAuthor} \n \n')
f.write(f"What they said origionally: {submission.selftext}")
counter = 1
for comment in comments:
if comment.author != "AutoModerator":
f.write(f' \n\n\n What people commented: {counter}) \n {comment.body}')
counter = counter + 1
break
def reLookUp():
#set the counter to show on the file how many comments
counter = 1
#list all files that you are watching
listOfFiles = []
os.chdir(" INITIAL FILE PLACE ")
#allow the user to see the posts that are to be chccked up on
fileCounter = 1
for everyfile in os.listdir():
listOfFiles.append(everyfile)
print(f'{fileCounter}) {everyfile}')
fileCounter = fileCounter + 1
#get the post to check from the user's choice by number
pickedOption = input("Which post would you like to check up on? ")
pickedOption = int(pickedOption) - 1
pickedOption = listOfFiles[pickedOption]
print(pickedOption)
#break up the file name ot reextract the subreddit, author name and post title
breakFileName = pickedOption.split("Author:")
titleOfPost = breakFileName[0]
titleOfPost = titleOfPost.strip()
#get the origional author's name
origionalAuthor = breakFileName [1]
origionalAuthor = origionalAuthor.split("Subreddit:")
origionalAuthor = origionalAuthor[0]
origionalAuthor = origionalAuthor.strip()
#get the subreddit name
breakFileName = pickedOption.split("Subreddit:")
desiredSubreddit = breakFileName [1]
noExtention = desiredSubreddit.split(".txt")
desiredSubreddit = noExtention[0]
desiredSubreddit = desiredSubreddit.strip()
# confirm the post to check, and the subreddit
print(f"Your post title was: {titleOfPost}")
print(f"This was posted by: {origionalAuthor}")
print(f"On the subreddit: {desiredSubreddit}")
for submission in reddit.redditor(origionalAuthor).stream.submissions():
print(submission.title)
if submission.title == titleOfPost:
submission.comments.replace_more(limit=100) # flatten tree
comments = submission.comments.list() # all comments
checkFileName = "Check: " + titleOfPost + ".txt"
os.chdir(" COMPARE FILE PLACE")
with open (checkFileName, "w") as f:
print("Made the file")
f.write(f'{titleOfPost} written by {origionalAuthor} \n \n')
f.write(f"What they said origionally: {submission.selftext}")
for comment in comments:
if comment.author != "AutoModerator":
f.write(f' \n\n\n What people commented: {counter}) \n {comment.body}')
counter = counter + 1
break
compareFiles(pickedOption, checkFileName)
def compareFiles(pickedOption, checkFileName):
print("Comparing files")
f1 = " INITAL FILE PLACE " + pickedOption
f2 = " COMAPRE FILE PLACE " + checkFileName
f1 = open(f1, "r")
f2 = open(f2, "r")
i = 0
for line1 in f1:
i += 1
for line2 in f2:
# matching line1 from both files
if line1 != line2:
print("Line ", i, ":")
# else print that line from both files
print("\tFile 1:", line1, end='')
print("\tFile 2:", line2, end='')
else:
print(f"This line hasn't changed: {i}")
break
# closing files
f1.close()
f2.close()
def closePost():
listOfFiles = []
firstDir = " INITAL FILE PLACE"
os.chdir(firstDir)
#allow the user to see the posts that are to be chccked up on
fileCounter = 1
for everyfile in os.listdir(firstDir):
listOfFiles.append(everyfile)
# print(f'{fileCounter}) {everyfile}')
fileCounter = fileCounter + 1
#how to get both files with different names and remove both/delete
completedFiles = []
secondDir = " COMPARE PLACE "
os.chdir(secondDir)
completedFileCounter = 1
for eachFile in os.listdir(secondDir):
completedFiles.append(eachFile)
# print(f"{completedFileCounter}) {eachFile}")
completedFileCounter = completedFileCounter + 1
# print (type(listOfFiles))
# print (type(completedFiles))
# print(f"{listOfFiles} \n")
# print(f"{completedFiles} \n")
postToLookForSubmissionTitles = []
postCompleteFileList = []
i = 0
watchedPostSubmissionNames = []
while i < len(listOfFiles):
splitTheNameUp = listOfFiles[i]
splitTheNameUp = splitTheNameUp.split("Author: ")
splitTheNameUp = splitTheNameUp[0]
watchedPostSubmissionNames.append(splitTheNameUp)
i = i + 1
c = 0
comparedFilesPostNames = []
while c < len(completedFiles):
splitCheckOff = completedFiles[c]
splitCheckOff = splitCheckOff.split("Check: ")
splitCheckOff = splitCheckOff[1]
comparedFilesPostNames.append(splitCheckOff)
c = c + 1
# print(f"List of files in Posts to look: {watchedPostSubmissionNames}\n")
# print(f"List of files in to compare: {comparedFilesPostNames}\n")
a = 0
while a < len(watchedPostSubmissionNames):
removeExtention = watchedPostSubmissionNames[a]
removeExtention = removeExtention.split(".txt")
removeExtention = removeExtention[0]
# print(removeExtention)
postToLookForSubmissionTitles.append(removeExtention)
a = a + 1
b = 0
# print(f"{comparedFilesPostNames[b]}\n")
while b < len(comparedFilesPostNames):
removeExtentions = comparedFilesPostNames[b]
removeExtentions = removeExtentions.split(".txt")
removeExtentions = removeExtentions[0]
postCompleteFileList.append(removeExtentions)
b = b + 1
# print(f"Proper names in watch submission: {postToLookForSubmissionTitles}\n")
# print(f"{len(comparedFilesPostNames)}\n")
# print(f"Proper names in files to compare: {postCompleteFileList}\n")
p = 0
q = 0
while p < len(watchedPostSubmissionNames):
while q < len(comparedFilesPostNames):
if watchedPostSubmissionNames[p] == comparedFilesPostNames[q]:
print(f"{watchedPostSubmissionNames[p]}\n")
print(f"{comparedFilesPostNames[q]}\n")
p = p + 1
q = q + 1
break
break
useroption = input("What would you like to do? (1) Store a new post? (2) Check if there are any updates? (3) Stop following a post? ")
# useroption = "3"
if useroption == "1":
newLookUp()
if useroption == "2":
reLookUp()
if useroption == "3":
closePost()
``` |
{
"source": "jonathondilworth/ttds_cw_two_code",
"score": 2
} |
#### File: ttds_cw_two_code/code/evallib.py
```python
from .mods import Document
from .mods import DocumentSet
from .mods import Result
from .mods import ResultSet
def precision(rel_docs, ret_docs):
'''fraction of relevent documents that have been retrieved | this query'''
numerator = len(rel_docs.intersection(ret_docs))
denominator = len(ret_docs)
return float(numerator) / float(denominator)
def recall(rel_docs, ret_docs):
'''fraction of relevent documents retrived | set of all relevent documents '''
numerator = len(rel_docs.intersection(ret_docs))
denominator = len(rel_docs)
return float(numerator) / float(denominator)
# TODO: Write Tests
def accuracy(true_positives, true_negatives, total_examples):
'''not particularly used in IR, -> 99.99% in lots of instances'''
numerator = true_positives + true_negatives
return float(numerator) / float(total_examples)
def f1_score(rel_docs, ret_docs):
'''f1 = (2 * p * r) / (p + r)'''
numerator = 2 * precision(rel_docs, ret_docs) * recall(rel_docs, ret_docs)
denominator = precision(rel_docs, ret_docs) + recall(rel_docs, ret_docs)
return float(numerator) / float(denominator)
def f_measure(rel_docs, ret_docs, hyper_beta=1):
'''fb = ((b^2 + 1) * p) * r / ((b^2) * p) + r'''
numerator_scalar = (hyper_beta ** 2) + 1
denominator_scalar = hyper_beta ** 2
numerator = numerator_scalar * precision(rel_docs, ret_docs) * recall(rel_docs, ret_docs)
denominator = (denominator_scalar * precision(rel_docs, ret_docs)) + recall(rel_docs, ret_docs)
return float(numerator) / float(denominator)
def precision_at_k(rel_docs, ret_docs, k=5):
'''ret_docs list is assumed to be ordered (ranked, most => least)'''
'''its possible that k > len(ret_docs) => always divide by k (precision)'''
# I don't think we should be truncating the relevent documents.. - TODO: check this
# in this SPECIAL INSTANCE, casting will be included in the function
rel_docs = set(rel_docs)
# truncated_rel_docs = set(rel_docs[0:k])
truncated_rel_docs = rel_docs
truncated_ret_docs = set(ret_docs[0:k])
precision_at_k_numerator = len(truncated_rel_docs.intersection(truncated_ret_docs))
precision_at_k_denominator = k
return float(precision_at_k_numerator) / float(precision_at_k_denominator)
# TODO: implement a unit test for this
def recall_at_k(rel_docs, ret_docs, k=50):
'''recall @ some value will simply calculate the recall at some point k in'''
'''the ranked list of retrieved documents'''
rel_docs = set(rel_docs)
truncated_ret_docs = set(ret_docs[0:k])
return recall(rel_docs, truncated_ret_docs)
def r_precision(rel_docs, ret_docs, r=None):
'''assumes that the length of the set of relevent documents is known: r'''
'''taking the precision at this length is an accurate measure of real precision'''
'''problem: how is the system / annotator suppose to know what r is on every query?'''
return precision_at_k(rel_docs, ret_docs, len(rel_docs) if r is None else r)
def average_precision(rel_docs, ret_docs, unknown_rel_docs=None):
total_rel_docs = len(rel_docs)
if unknown_rel_docs is not None:
total_rel_docs = unknown_rel_docs
total_found_rel_docs = 0
total_precision_scores = []
for idx, document in enumerate(ret_docs):
current_document = idx + 1
if document in rel_docs:
total_found_rel_docs += 1
total_precision_scores.append(float(total_found_rel_docs) / float(current_document))
total_precision = sum(total_precision_scores)
average_precision = float(total_precision) / float(total_rel_docs)
return average_precision
def mean_average_precision(queries):
average_precision_values = []
for q in queries:
computed_value = average_precision(q.rel_docs, q.ret_docs, q.total_rel_docs)
average_precision_values.append(computed_value)
total_avg_precision = sum(average_precision_values)
mean_average_precision = float(total_avg_precision) / float(len(average_precision_values))
return mean_average_precision
# TODO: implement
# Note: due to time constraints, tests could not be implemented for these functions
# Note2: due to time constraints, could not finish implementing dg / ndcg / indcg
def discounted_gain(rel_docs, ret_docs, k=2):
'''rel_docs '''
rel_docs_ordered_by_score = rel_docs.all_docs_ordered_by_score()
rel_1 = rel_docs_ordered_by_score[0].score
total_score = float(rel_1)
# ret_docs.ordered_doc_attrs_for_query_by_rank()
for doc_idx in range(1, (k + 1)):
total_score += (float((''' DOC SCORE FOR CURRENT DOCUMENT ''')) / float(log(doc_idx, 2)))
return total_score
# TODO: implement
# Note: due to time constraints, tests could not be implemented for these functions
def norm_discounted_cumulative_gain():
pass
# Note: due to time constraints, tests could not be implemented for these functions
def ndcg_at_k(rel_docs, ret_docs, k):
'''rel_docs should be a DocumentSet'''
'''ret_docs should be a ResultSet'''
pass
```
#### File: ttds_cw_two_code/code/utillib.py
```python
import re
def flatten_white_space(input_string, expression=" +", replacement=" "):
return re.sub(expression, replacement, input_string)
def parse_rel_result_str(string, delimiter=' ', empty_start=True):
''' string : input string'''
''' delimiter : seperates results'''
'''empty_start : is first result empty?'''
'''return : list of tuples STRINGS'''
results = string.split(delimiter)
return results[1:] if empty_start else results
def parse_tuple(string):
doc_num, score = string.strip('()').split(',')
return (int(doc_num), int(score))
```
#### File: ttds_cw_two_code/tests/test_average_precision.py
```python
import unittest
from code.evallib import average_precision
# TODO: abstract out QuerySet & Query
class QuerySet():
def __init__(self, queries):
self.queries = queries
class Query():
def __init__(self, rel_docs, ret_docs, total_rel_docs=None):
self.rel_docs = rel_docs
self.ret_docs = ret_docs
if total_rel_docs is None:
self.total_rel_docs = len(self.rel_docs)
else:
self.total_rel_docs = total_rel_docs
# something cool to think about - query generator - TODO
class TestAVGPrecision(unittest.TestCase):
''' AP_1 = 3.04 / 4 = 0.76, AP_2 = 0.62 / 3, AP_3 = 1.275 / 7 = 0.182 '''
def test_exp_avg_pre_one(self):
query_one = Query(rel_docs = [1, 2, 5, 9],
ret_docs = [1, 2, 3, 4, 5, 6, 7, 8, 9])
test_score = average_precision(query_one.rel_docs, query_one.ret_docs)
expected_score = (float(1) + float(1) + (float(3) / float(5)) + (float(4) / float(9))) / float(4)
self.assertEqual(test_score, expected_score)
def test_exp_avg_pre_two(self):
query_two = Query(rel_docs = [3, 7],
ret_docs = [1, 2, 3, 4, 5, 6, 7, 8],
total_rel_docs = 3)
test_score = average_precision(query_two.rel_docs, query_two.ret_docs, query_two.total_rel_docs)
expected_score = ((float(1) / float(3)) + (float(2) / float(7))) / float(3)
self.assertEqual(test_score, expected_score)
def test_exp_avg_pre_three(self):
query_three = Query(rel_docs = [2, 5, 8],
ret_docs = [1, 2, 3, 4, 5, 6, 7, 8, 9],
total_rel_docs = 7)
test_score = average_precision(query_three.rel_docs, query_three.ret_docs, query_three.total_rel_docs)
expected_score = ((float(1) / float(2)) + (float(2) / float(5)) + (float(3) / float(8))) / float(7)
self.assertEqual(test_score, expected_score)
if __name__ == '__main__':
unittest.main()
```
#### File: ttds_cw_two_code/tests/test_precision.py
```python
import unittest
from code.evallib import precision
# TODO: consider using metaclasses to ensure that all tests are written in the form: test_<x>
class TestPrecision(unittest.TestCase):
'''
Precision tests
precision accepts two document sets (relevent and retrieved)
It returns the value of: |(relevent INTERSECTION retrieved)| / |retrieved|
'''
def test_expected_precision(self):
relevent_documents = {1, 2}
retrieved_documents = {2, 3, 4}
self.assertEqual(precision(relevent_documents, retrieved_documents), (float(1)/float(3)))
def test_return_type(self):
relevent_documents = {1, 2}
retrieved_documents = {2, 3, 4}
self.assertIsInstance(precision(relevent_documents, retrieved_documents), float)
def test_divide_by_zero(self):
relevent_documents = {1, 2}
retrieved_documents = {}
with self.assertRaises(ZeroDivisionError):
precision(relevent_documents, retrieved_documents)
def test_floating_point(self):
"""precision itself doesn't care about the labels that documents use"""
relevent_documents = {0.11, 0.23}
retrieved_documents = {1, 3, 0.23, 4}
self.assertEqual(precision(relevent_documents, retrieved_documents), (float(1)/float(4)))
def test_invalid_arguments(self):
"""using lists instead of sets should cause problems"""
relevent_documents = [1, 2]
retrieved_documents = [2, 3, 4]
with self.assertRaises(AttributeError):
precision(relevent_documents, retrieved_documents)
def test_no_arguments(self):
with self.assertRaises(TypeError):
precision()
def test_expects_zero(self):
"""casting a dict to a set is a thing, so we can expect zero"""
relevent_documents = {1, 2}
retrieved_documents = {'a':'b','c':'d'}
self.assertEqual(precision(relevent_documents, retrieved_documents), float(0))
if __name__ == '__main__':
unittest.main()
```
#### File: ttds_cw_two_code/tests/test_recall.py
```python
import unittest
from code.evallib import recall
class TestRecall(unittest.TestCase):
'''
Recall tests
recall excepts two parameters: two document sets (relevent and retrieved)
It returns the value of: |(relevent INTERSECTION retrieved)| / |relevent|
'''
def test_expected(self):
relevent_documents = {3306, 3022, 1892, 100}
retrieved_documents = {100, 3022, 3307, 3308, 3309, 1001, 202}
self.assertEqual(recall(relevent_documents, retrieved_documents), (float(2)/float(4)))
def test_return_type(self):
relevent_documents = {3306, 3022, 1892, 100}
retrieved_documents = {100, 3022, 3307, 3308, 3309, 1001, 202}
self.assertIsInstance(recall(relevent_documents, retrieved_documents), float)
def test_expecting_zero(self):
relevent_documents = {22}
retrieved_documents = {100, 3022, 3307, 3308, 3309, 1001, 202}
self.assertEqual(recall(relevent_documents, retrieved_documents), float(0))
def test_empty_relevent_set(self):
relevent_documents = {}
retrieved_documents = {100, 3022, 3307, 3308, 3309, 1001, 202}
with self.assertRaises(AttributeError):
recall(relevent_documents, retrieved_documents)
def test_invalid_arguments(self):
relevent_documents = [1, 2]
retrieved_documents = [2, 3, 4]
with self.assertRaises(AttributeError):
recall(relevent_documents, retrieved_documents)
def test_no_arguments(self):
with self.assertRaises(TypeError):
recall()
def test_expects_zero(self):
"""casting a dict to a set is a thing, so we can expect zero"""
relevent_documents = {1, 2}
retrieved_documents = {'a':'b','c':'d'}
self.assertEqual(recall(relevent_documents, retrieved_documents), float(0))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathonfletcher/adventofcode",
"score": 3
} |
#### File: 2020/day25/day25.py
```python
def xform(subject_number, loop_size):
in_value = 1
for _ in range(loop_size):
in_value = ( in_value * subject_number ) % 20201227
return in_value
def find_loop_size(pk, subject_number=7):
loop_size = 0
in_value = 1
while True:
loop_size += 1
in_value = ( in_value * subject_number ) % 20201227
if in_value == pk:
break
return loop_size
public_keys = [5764801, 5764801]
loop_sizes = [find_loop_size(pk) for pk in public_keys]
loop_sizes = list(reversed(loop_sizes))
for i in range(len((public_keys))):
k = xform(public_keys[i], loop_sizes[i])
print("{}".format(k))
```
#### File: 2021/day16/main.py
```python
from functools import reduce
import os
import inspect
class P1(object):
def __init__(self) -> None:
self.version_sum = 0
self.operators = {
'000': self.operand_sum,
'001': self.operand_prod,
'010': self.operand_min,
'011': self.operand_max,
'101': self.operand_gt,
'110': self.operand_lt,
'111': self.operand_eq
}
def operand_sum(self, stack):
r = reduce(lambda x, y: x+y, stack)
# print(f'sum({stack}) -> {r}')
return r
def operand_prod(self, stack):
r = reduce(lambda x, y: x*y, stack)
# print(f'prod({stack}) -> {r}')
return r
def operand_min(self, stack):
r = reduce(lambda x, y: min(x, y), stack)
# print(f'min({stack}) -> {r}')
return r
def operand_max(self, stack):
r = reduce(lambda x, y: max(x, y), stack)
# print(f'max({stack}) -> {r}')
return r
def operand_gt(self, stack):
r = 0
if stack[0] > stack[1]:
r = 1
# print(f'gt({stack}) -> {r}')
return r
def operand_lt(self, stack):
r = 0
if stack[0] < stack[1]:
r = 1
# print(f'lt({stack}) -> {r}')
return r
def operand_eq(self, stack):
r = 0
if stack[0] == stack[1]:
r = 1
# print(f'eq({stack}) -> {r}')
return r
def process_XXX(self, packet:str, stack:list) -> str:
# print(f'{len(packet)} {packet} {inspect.currentframe().f_code.co_name}')
pkt_op_length, packet = packet[:1], packet[1:]
if int(pkt_op_length, 2) == 0:
sub_pkt_length, packet = packet[:15], packet[15:]
# print(f'{pkt_op_length} {sub_pkt_length}')
sub_length = int(sub_pkt_length, 2)
sub_pkt, packet = packet[:sub_length], packet[sub_length:]
while len(sub_pkt):
sub_pkt = self.process(sub_pkt, stack)
return packet
else:
sub_pkt_count, packet = packet[:11], packet[11:]
# print(f'{pkt_op_length} {sub_pkt_count}')
for i in range(int(sub_pkt_count, 2)):
packet = self.process(packet, stack)
return packet
def process_100(self, packet:str, stack:list) -> str:
# print(f'{len(packet)} {packet} {inspect.currentframe().f_code.co_name}')
group_val = ""
while True:
pkt_group, pkt_group_val, packet = packet[0], packet[1:5], packet[5:]
group_val += pkt_group_val
if int(pkt_group, 2) != 1:
break
stack.append(int(group_val, 2))
return packet
def process(self, packet:str, stack:list) -> str:
if not ( len(packet) and packet.count('1') > 0 ):
return ''
pkt_version, pkt_type, packet = packet[:3], packet[3:6], packet[6:]
# print(f'{pkt_version} {pkt_type} {packet}')
self.version_sum += int(pkt_version, 2)
if pkt_type == '100':
packet = self.process_100(packet, stack)
else:
sub_stack = []
packet = self.process_XXX(packet, sub_stack)
operator = self.operators.get(pkt_type)
if operator is not None:
op_result = operator(sub_stack)
stack.append(op_result)
return packet
part_one_tests = [
["8A004A801A8002F478", 16],
["620080001611562C8802118E34", 12],
["C0015000016115A2E0802F182340", 23],
["A0016C880162017C3686B18A3D4780", 31]
]
part_two_tests = [
["C200B40A82", 3],
["04005AC33890", 54],
["CE00C43D881120", 9],
["D8005AC2A8F0", 1],
["F600BC2D8F", 0],
["9C005AC2F8F0", 0],
["9C0141080250320F1802104A08", 1]
]
if False:
pass_tests = True
for input, output in part_two_tests:
packet = ''.join([format(i, f'04b') for i in
[int(x, 16) for x in input]])
print(f'{input} - {packet}')
p = P1()
s = []
p.process(packet, s)
# print(p.result)
# print(p.results)
# print(s)
# r = Runner()
# result = r.run(s[0])
if len(s) != 1:
pass_tests = False
if s[0] != output:
pass_tests = False
if not pass_tests:
break
if pass_tests:
print("PASS")
else:
print("FAIL")
if True:
with open('part1.txt') as ifp:
input = ifp.readline().strip()
packet = ''.join([format(i, f'04b') for i in
[int(x, 16) for x in input]])
# print(f'{input} - {packet}')
p = P1()
s = []
p.process(packet, s)
print(p.version_sum)
print(s[0])
```
#### File: 2021/day18/main.py
```python
def do_explode(l):
d = -1
for i in range(len(l)):
e = l[i]
# print(f' {i:4} {e} {d:4}')
if e == '[':
d += 1
elif e == ']':
d -= 1
elif e != ',' and d == 4:
for li in range(i-1, -1, -1):
if l[li] not in ['[', ']', ',']:
break
if li > 0:
l[li] += l[i]
for ri in range(i+4, len(l)):
if l[ri] not in ['[', ']', ',']:
break
if ri < len(l)-1:
l[ri] += l[i+2]
return l[:i-1] + [0] + l[i+4:]
return None
def do_split(l):
for i in range(len(l)):
e = l[i]
if e not in ['[', ']', ',']:
if e > 9:
le = e // 2
re = e - le
return l[:i] + ['[', le, ',', re, ']'] + l[i+1:]
return None
def do_reduce(l):
while True:
# print(''.join(map(lambda x: str(x), l)))
el = do_explode(l)
if not el:
el = do_split(l)
if not el:
return l
l = el
def do_add(lhs, rhs):
return ['['] + lhs + [','] + rhs + [']']
def do_mag(l):
if len(l) > 1:
for i in range(len(l)):
if l[i] not in [ '[', ']',',']:
lhs = l[i]
rhs = l[i+2]
if rhs not in ['[', ']', ',']:
ne = 3 * lhs + 2 * rhs
ll = l[:i-1]
rl = l[i+4:]
return do_mag(ll + [ ne ] + rl)
return l.pop()
def string_to_list(s):
l = list()
for e in s:
if e in ['[', ']', ',']:
l.append(e)
elif e >= '0' and e <= '9':
l.append(int(e))
return l
parts = list()
with open('part1.txt') as ifp:
pl = None
for ins in [l.strip() for l in ifp.readlines()]:
parts.append(ins)
inl = string_to_list(ins)
if pl is not None:
inl = do_add(pl, inl)
pl = do_reduce(inl)
print(''.join(map(lambda x: str(x), pl)))
print(do_mag(pl))
max_l = None
max_r = None
max_m = 0
for li in range(len(parts)):
for ri in range(len(parts)):
if li == ri:
continue
ls = string_to_list(parts[li])
rs = string_to_list(parts[ri])
m = do_mag(do_reduce(do_add(ls, rs)))
if m > max_m:
max_l = parts[li]
max_r = parts[ri]
max_m = m
m = do_mag(do_reduce(do_add(rs, ls)))
if m > max_m:
max_l = parts[ri]
max_r = parts[li]
max_m = m
print(f'{max_l}')
print(f'{max_r}')
print(f'{max_m}')
```
#### File: 2021/day19/input.py
```python
from typing import Final, List, Set, Dict, Tuple, Any
from collections import defaultdict
from functools import total_ordering
@total_ordering
class Point(object):
def __init__(self, x: int, y: int, z: int) -> None:
self.x: Final = x
self.y: Final = y
self.z: Final = z
def __repr__(self) -> str:
p = str(','.join(map(lambda x: str(x), [self.x, self.y, self.z])))
return f'{self.__class__.__name__}({p})'
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.x == other.x
and self.y == other.y
and self.z == other.z)
def __lt__(self, other):
return self.x < other.x or self.y < other.y or self.z < other.z
class Rotation(Point):
c: Final = 0
s: Final = 1
rx: Final = [[1, 0, 0], [0, c, -s], [0, s, c]]
ry: Final = [[c, 0, s], [0, 1, 0], [-s, 0, c]]
rz: Final = [[c, -s, 0], [s, c, 0], [0, 0, 1]]
@staticmethod
def dot(ls, rs):
return [[sum(ls * rs
for ls, rs in zip(lsr, rsc))
for rsc in zip(*rs)]
for lsr in ls]
@staticmethod
def all():
rotations = list()
for xr, zr in [[0, 0], [1, 0], [2, 0], [3, 0], [0, 1], [0, 3]]:
for yr in range(3):
rotations.append(Rotation(xr, yr, zr))
return rotations
# def donr(n, p, r):
# for i in range(n):
# p = Rotation.dot(p, r)
# return p
# rotations = set()
# seen_p = set()
# for nx in range(4):
# for ny in range(4):
# for nz in range(4):
# p = [[1, 2, 3]]
# p = donr(nx, p, Rotation.rx)
# p = donr(ny, p, Rotation.ry)
# p = donr(nz, p, Rotation.rz)
# if str(p) not in seen_p:
# # print(f'nx:{nx}, ny:{ny}, nz:{nz}')
# seen_p.add(str(p))
# rotations.add(Rotation(nx, ny, nz))
# # print(len(seen_p))
# return sorted(rotations)
class Beacon(Point):
def __sub__(self, other):
return self.sub(other)
def sub(self, other):
return Point(self.x-other.x, self.y-other.y, self.z-other.z)
def __add__(self, other: Point):
return self.add(other)
def in_range(self, other: Point):
d = self - other
return ( abs(d.x) <= 2000 and abs(d.y) <= 2000 and abs(d.z) <= 2000 )
def add(self, other: Point):
return Beacon(self.x+other.x, self.y+other.y, self.z+other.z)
def rotate(self, r: Rotation):
def donr(n, p, r):
p = [p]
for i in range(n):
p = Rotation.dot(p, r)
return p[0]
xyz = [self.x, self.y, self.z]
xyz = donr(r.x, xyz, Rotation.rx)
xyz = donr(r.y, xyz, Rotation.ry)
xyz = donr(r.z, xyz, Rotation.rx)
return Beacon(*xyz)
@total_ordering
class Scanner(object):
def __init__(self, name):
self.name: Final = name
self.beacons: Final = list()
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self.name}")'
def __hash__(self):
return hash(str(self))
def __eq__(self, __o: object) -> bool:
return self.name == __o.name
def __lt__(self, __o: object) -> bool:
return self.name < __o.name
def rotated_beacons(self, r: Rotation):
rb = list()
for b in self.beacons:
rb.append(b.rotate(r))
return rb
def add_beacon(self, b: Beacon) -> None:
self.beacons.append(b)
def rotate(self, r: Rotation):
ns = Scanner(self.name)
for b in self.rotated_beacons(r):
ns.add_beacon(b)
return ns
def position(self, p: Point):
ns = Scanner(self.name)
for b in self.beacons:
ns.add_beacon(Beacon(b.x+p.x, b.y+p.y, b.z+p.z))
return ns
if True:
found_scanners: Final = set()
lost_scanners: Final = set()
scanner_translations: Final = dict()
with open('eg1.txt') as ifp:
s = None
for line in [l.strip() for l in ifp.readlines()]:
if len(line) > 0:
if line[0] == '-' and line[-1] == '-':
s_name = line.split()[-2]
s = Scanner(s_name)
if s_name == "0":
found_scanners.add(s)
scanner_translations[s] = (
Rotation(0, 0, 0), Point(0, 0, 0))
else:
lost_scanners.add(s)
else:
x, y, z = list(map(lambda x: int(x), line.split(',')))
s.add_beacon(Beacon(x, y, z))
rotations: Final = Rotation.all()
while len(lost_scanners) > 0:
print()
print(f'found: {found_scanners}')
print(f'lost: {lost_scanners}')
print(f'xforms: {scanner_translations}')
print(found_scanners)
for fs in found_scanners:
fs_r, fs_p = scanner_translations[fs]
# tfs = fs.position(fp).rotate(fr)
rfs = fs.rotate(fs_r)
# in_range_remaining_scanners = set()
# for rs in remaining_scanners:
# for r in rotations:
# rrs = rs.rotate(r)
# for fsb in rs.beacons:
# for rsb in rs.beacons:
# if fsb.in_range(rsb):
rfs = rfs.position(fs_p)
scanner_found = False
for ls in lost_scanners:
print(f'{rfs} -> {ls}')
for r in rotations:
diff_map = defaultdict(int)
for fb in rfs.beacons:
for lb in ls.rotated_beacons(r):
if fb.in_range(lb):
bd = fb-lb
# print(f'{b0} - {b1} == {b0-b1}')
diff_map[bd] += 1
# print(diff_map)
for k, v in diff_map.items():
if v == 12:
print(f'{k}: {v}')
for k, v in diff_map.items():
if v == 12:
print(f'{rfs} matched {ls} with {r} and {k}')
scanner_found = True
scanner_translations[ls] = [r, k]
found_scanners.add(ls)
lost_scanners.remove(ls)
if scanner_found:
break
if scanner_found:
break
if scanner_found:
break
print(found_scanners)
print(scanner_translations)
print(lost_scanners)
```
#### File: 2021/day20/main.py
```python
class P(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return ( self.__class__ == other.__class__
and self.x == other.x
and self.y == other.y )
def __repr__(self):
return f'{self.__class__.__name__}({self.x},{self.y})'
def __hash__(self):
return hash(str(self))
def print_image(image):
output_mapping = { 0: '.', 1: '#'}
img_x = list(set(map(lambda x: x.x, image.keys())))
img_y = list(set(map(lambda x: x.y, image.keys())))
for y in range(min(img_y), max(img_y)+1):
print_line = ""
for x in range(min(img_x), max(img_x)+1):
print_line += output_mapping.get((image.get(P(x, y), 0)))
print(print_line)
algo = None
image = {}
input_mapping = { '#': 1, '.': 0 }
with open('part1.txt') as ifp:
y=0
for line in [l.strip() for l in ifp.readlines()]:
if len(line) > 0:
if algo is None:
algo = list(map(lambda x: input_mapping[x], line))
else:
x = 0
for v in line:
image[P(x,y)] = input_mapping[v]
x += 1
y += 1
for iteration in range(50):
current_x = list(set(map(lambda x: x.x, image.keys())))
current_y = list(set(map(lambda x: x.y, image.keys())))
current_image_default = 0
new_image = dict()
new_image_lit = 0
if algo[0] > 0 and algo[-1] < 1:
current_image_default = algo[0]
if not iteration % 2:
current_image_default = algo[-1]
# print(f'{1+iteration} {current_image_default}')
for y in range(min(current_y)-2, max(current_y)+3):
for x in range(min(current_x)-2, max(current_x)+3):
algo_offset = 0
for dy in [ -1, 0, 1 ]:
for dx in [ -1, 0, 1 ]:
ov = image.get(P(x+dx, y+dy), current_image_default)
algo_offset <<= 1
algo_offset += ov
nv = algo[algo_offset]
new_image[P(x,y)] = nv
new_image_lit += nv
# print_image(new_image)
if iteration in [ 1, 49 ]:
print(f'{1+iteration} {new_image_lit}')
image = new_image
```
#### File: 2021/day21/main.py
```python
from typing import List
import functools
class Player:
def __init__(self, name, startpos):
self.name = name
self.startpos = startpos - 1
self.score = 0
def __repr__(self):
return f'{self.name}({self.startpos},{self.score})'
def move(self, roll):
self.startpos = (self.startpos + roll) % 10
self.score += self.startpos + 1
class Game1:
class Die:
def __init__(self):
self.value = 0
self.count = 0
def roll(self):
self.value = (self.value + 1) % 100
self.count += 1
return self.value
def __init__(self, players: List[Player], winscore: int = 1000):
self.loser = None
self.winscore = winscore
self.d = Game1.Die()
while self.loser is None:
for i in [0, 1]:
if self.turn(players[i], self.d):
self.loser = players[1-i]
return
def turn(self, p: Player, d: Die) -> bool:
p.move(d.roll() + d.roll() + d.roll())
return p.score >= self.winscore
def result(self) -> int:
return self.d.count * self.loser.score
class Game2:
def __init__(self, players: List[Player], winscore: int = 21):
self.players = players
self.winscore = winscore
@staticmethod
@functools.cache
def run_game(p0p, p0s, p1p, p1s, winscore):
w0, w1 = 0, 0
for roll in [(x, y, z) for x in [1, 2, 3] for y in [1, 2, 3] for z in [1, 2, 3]]:
tp0p = (p0p + sum(roll)) % 10
tp0s = (p0s+tp0p+1)
if tp0s < 21:
nw1, nw0 = Game2.run_game(p1p, p1s, tp0p, tp0s, winscore)
w0, w1 = w0+nw0, w1+nw1
else:
w0 += 1
return w0, w1
def result(self):
return max(Game2.run_game(self.players[0].startpos, 0, self.players[1].startpos, 0, self.winscore))
# players = [Player("1", 4), Player("2", 8)]
players = [Player("1", 10), Player("2", 6)]
g1 = Game1(players)
print(g1.result())
players = [Player("1", 10), Player("2", 6)]
g2 = Game2(players)
print(g2.result())
```
#### File: 2021/day25/main.py
```python
from typing import List
def read_input(ifp):
grid = list()
for line in [x.strip() for x in ifp.readlines()]:
grid.append(list(line.strip()))
return grid
def shiftright(grid):
newgrid = [list(x) for x in grid]
maxy = len(grid)
maxx = len(grid[0])
delta = 0
y = 0
while y < maxy:
x = 0
while x < maxx:
nx = (x+1)%maxx
if grid[y][x] == '>' and grid[y][nx] == '.':
newgrid[y][nx] = grid[y][x]
newgrid[y][x] = '.'
x += 2
delta += 1
continue
else:
x += 1
continue
y += 1
return newgrid
def shiftdown(grid):
newgrid = [list(x) for x in grid]
maxy = len(grid)
maxx = len(grid[0])
delta = 0
x = 0
while x < maxx:
y = 0
while y < maxy:
ny = (y+1)%maxy
if grid[y][x] == 'v' and grid[ny][x] == '.':
newgrid[ny][x] = grid[y][x]
newgrid[y][x] = '.'
y += 2
delta += 1
continue
else:
y += 1
continue
x += 1
return newgrid
def day25(grid:List[List[str]]):
print(f'{len(grid)} / {len(grid[0])}')
newgrid = grid
grid = list()
counter = 0
while newgrid != grid:
grid = newgrid
newgrid = shiftdown(shiftright(grid))
# print(f' {grid}')
# print(f':{newgrid}')
counter+= 1
print(counter)
return
if __name__ == '__main__':
with open("part1.txt") as ifp:
grid = read_input(ifp)
day25(grid)
``` |
{
"source": "jonathonfletcher/LazyBlacksmith",
"score": 2
} |
#### File: LazyBlacksmith/lazyblacksmith/app.py
```python
from flask import Flask
from flask import flash
from flask import g
from flask import render_template
from flask import request
from flask_wtf.csrf import CSRFProtect
import flask_login
# helpers
from lazyblacksmith.utils.template_filter import templatefilter
from lazyblacksmith.utils.time import utcnow
from lazyblacksmith.utils.context_processor import inject_user
from lazyblacksmith.utils.context_processor import inject_enums
from lazyblacksmith.utils.request import is_xhr
# db
from lazyblacksmith.models import TokenScope
from lazyblacksmith.models import User
from lazyblacksmith.models import db
# extensions
from lazyblacksmith.extension.cache import CACHE
from lazyblacksmith.extension.login_manager import login_manager
def create_app(config_object):
# app
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_before_requests(app)
register_errorhandlers(app)
register_context_processors(app)
register_teardown_appcontext(app)
# return app
return app
def register_blueprints(app):
""" register blueprints & helper blueprints """
# blueprint import, only here because of exts
from lazyblacksmith.views import ajax_eve_api
from lazyblacksmith.views import ajax_eve_sde
from lazyblacksmith.views import ajax_account
from lazyblacksmith.views import blueprint
from lazyblacksmith.views import home
from lazyblacksmith.views import price
from lazyblacksmith.views import sso
from lazyblacksmith.views import template
from lazyblacksmith.views import account
app.register_blueprint(ajax_eve_api, url_prefix='/ajax/eveapi')
app.register_blueprint(ajax_eve_sde, url_prefix='/ajax/evesde')
app.register_blueprint(ajax_account, url_prefix='/ajax/account')
app.register_blueprint(blueprint, url_prefix='/blueprint')
app.register_blueprint(template, url_prefix='/template')
app.register_blueprint(sso, url_prefix='/sso')
app.register_blueprint(price, url_prefix='/price')
app.register_blueprint(account, url_prefix='/account')
app.register_blueprint(home)
app.register_blueprint(templatefilter)
def register_extensions(app):
"""Register Flask extensions."""
db.app = app
db.init_app(app)
csrf = CSRFProtect()
csrf.init_app(app)
CACHE.init_app(app)
login_manager.init_app(app)
def register_errorhandlers(app):
"""Add errorhandlers to the app."""
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 403, 404, 500]:
app.errorhandler(errcode)(render_error)
def register_before_requests(app):
"""Register before_request functions."""
def global_user():
g.user = flask_login.current_user
def check_and_update_user():
""" check for invalid token and print message and update last seen """
if flask_login.current_user.is_authenticated and not is_xhr(request):
char_id = flask_login.current_user.character_id
current_user = flask_login.current_user
count_error = TokenScope.query.filter_by(
valid=False
).join(User).filter(
((User.main_character_id.is_(None)) &
(User.character_id == char_id)) |
(User.main_character_id == char_id)
).filter(
((TokenScope.last_update.is_(None)) &
(TokenScope.updated_at >= current_user.current_login_at)) |
(TokenScope.last_update >= current_user.current_login_at)
).count()
if count_error > 0:
flash('You have at least one scope that have been invalidate.'
' Please take a moment to check and update it, '
' or remove it.', 'danger')
flask_login.current_user.current_login_at = utcnow()
db.session.commit()
app.before_request(global_user)
app.before_request(check_and_update_user)
def register_context_processors(app):
"""Register context_processor functions."""
app.context_processor(inject_user)
app.context_processor(inject_enums)
def register_teardown_appcontext(app):
"""Register teardown_appcontext functions."""
def commit_on_success(error=None):
if error is None:
db.session.commit()
app.teardown_appcontext(commit_on_success)
```
#### File: models/sde/item.py
```python
from . import db
from .activity import Activity
from flask import url_for
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=True)
max_production_limit = db.Column(db.Integer, nullable=True)
market_group_id = db.Column(db.Integer)
group_id = db.Column(db.Integer)
category_id = db.Column(db.Integer)
volume = db.Column(db.Numeric(precision=16, scale=4, decimal_return_scale=4, asdecimal=False), nullable=True)
# calculated field on import
is_from_manufacturing = db.Column(db.Boolean(), default=True)
is_from_reaction = db.Column(db.Boolean(), default=True)
base_cost = db.Column(
db.Numeric(
precision=17,
scale=2,
decimal_return_scale=2,
asdecimal=False
),
nullable=True,
)
# foreign keys
activities = db.relationship(
'Activity',
backref='blueprint',
lazy='dynamic'
)
activity_products = db.relationship(
'ActivityProduct',
backref='blueprint',
lazy='dynamic',
foreign_keys='ActivityProduct.item_id'
)
activity_skills = db.relationship(
'ActivitySkill',
backref='blueprint',
lazy='dynamic',
foreign_keys='ActivitySkill.item_id'
)
activity_materials = db.relationship(
'ActivityMaterial',
backref='blueprint',
lazy='dynamic',
foreign_keys='ActivityMaterial.item_id'
)
product_for_activities = db.relationship(
'ActivityProduct',
backref='product',
lazy='dynamic',
foreign_keys='ActivityProduct.product_id'
)
skill_for_activities = db.relationship(
'ActivitySkill',
backref='skill',
lazy='dynamic',
foreign_keys='ActivitySkill.skill_id'
)
material_for_activities = db.relationship(
'ActivityMaterial',
backref='material',
lazy='dynamic',
foreign_keys='ActivityMaterial.material_id'
)
# relationship only defined for performance issues
# ------------------------------------------------
activity_products__eager = db.relationship(
'ActivityProduct',
lazy='joined',
foreign_keys='ActivityProduct.item_id'
)
def icon_32(self):
static_url = "ccp/Types/%d_32.png" % self.id
return url_for('static', filename=static_url)
def icon_64(self):
static_url = "ccp/Types/%d_64.png" % self.id
return url_for('static', filename=static_url)
def is_moon_goo(self):
return self.market_group_id == 499
def is_pi(self):
return self.category_id == 43
def is_mineral_salvage(self):
return self.market_group_id in [1857, 1033, 1863]
def is_ancient_relic(self):
return self.category_id == 34
def is_cap_part(self):
""" Return if the item is a cap part / blueprint of cap part.
914 / 915 are Blueprints
913 / 873 are their respective items """
return self.group_id in [914, 915, 913, 873]
```
#### File: views/ajax/eve_api.py
```python
from flask import Blueprint
from flask import jsonify
from flask import request
from sqlalchemy import func
from lazyblacksmith.models import IndustryIndex
from lazyblacksmith.models import ItemAdjustedPrice
from lazyblacksmith.models import ItemPrice
from lazyblacksmith.models import SolarSystem
from lazyblacksmith.utils.json import json_response
from lazyblacksmith.utils.request import is_xhr
import humanize
ajax_eve_api = Blueprint('ajax_eve_api', __name__)
@ajax_eve_api.route('/get_price/<string:item_list>', methods=['GET'])
def get_price(item_list):
"""
Get prices for all items we need !
"""
if is_xhr(request):
item_list = item_list.split(',')
# get all items price
item_prices = ItemPrice.query.filter(
ItemPrice.item_id.in_(item_list)
)
item_price_list = {}
for price in item_prices:
if price.region_id not in item_price_list:
item_price_list[price.region_id] = {}
item_price_list[price.region_id][price.item_id] = {
'sell': price.sell_price,
'buy': price.buy_price,
'updated_at': humanize.naturaltime(price.get_delta_update()),
}
# get all items adjusted price
item_adjusted = ItemAdjustedPrice.query.filter(
ItemAdjustedPrice.item_id.in_(item_list)
)
item_adjusted_list = {}
for item in item_adjusted:
item_adjusted_list[item.item_id] = item.price
return jsonify(
{'prices': item_price_list, 'adjusted': item_adjusted_list}
)
else:
return 'Cannot call this page directly', 403
@ajax_eve_api.route(
'/get_index/<string:solar_system_names>',
methods=['GET']
)
def get_index_activity(solar_system_names):
""" Return all indexes for a given solarsystem name """
ss_name_list = solar_system_names.split(',')
# get the solar systems
solar_systems = SolarSystem.query.filter(
func.lower(SolarSystem.name).in_(ss_name_list)
).all()
if not solar_systems:
return json_response(
'warning',
'Solar systems (%s) does not exist' % solar_system_names,
404
)
# put the solar system in a dict
solar_systems_list = {}
for system in solar_systems:
solar_systems_list[system.id] = system.name
# get the index from the list of solar system
industry_index = IndustryIndex.query.filter(
IndustryIndex.solarsystem_id.in_(solar_systems_list.keys()),
).all()
if not industry_index:
return json_response(
'warning',
('There is no index for Solar System (%s).' % solar_system_names),
404
)
# and then put that index list into a dict[solar_system_name] = cost_index
index_list = {}
for index in industry_index:
ss = solar_systems_list[index.solarsystem_id].lower()
if ss not in index_list:
index_list[ss] = {}
index_list[ss][index.activity] = index.cost_index
return jsonify(index=index_list)
```
#### File: views/ajax/__init__.py
```python
from flask import request
from lazyblacksmith.utils.request import is_xhr
import logging
logger = logging.getLogger('lb.ajax')
def is_not_ajax():
"""
Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403)
"""
return not is_xhr(request)
``` |
{
"source": "jonathonjb/Cars_PyGame",
"score": 3
} |
#### File: jonathonjb/Cars_PyGame/main.py
```python
import pygame as pg
from mapGenerator import MapGenerator
from vehicle import Vehicle
FRAMES_PER_SECOND = 30
MIN_TILE_SIZE = 13
MAX_TILE_SIZE = 30
SCREEN_WIDTH_APPROX = 800
SCREEN_HEIGHT_APPROX = 600
def main():
pg.init()
pg.font.init()
scoreFont = pg.font.SysFont(None, 100)
clock = pg.time.Clock()
currTileSize = MAX_TILE_SIZE + 1
score = -1
lives = 5
gameIsRunning = True
while(gameIsRunning):
score += 1
if(currTileSize != MIN_TILE_SIZE):
currTileSize -=1
numOfXTiles, numOfYTiles, screen = initializeScreen(currTileSize)
vehicleStartX = currTileSize
vehicleStartY = numOfYTiles / 2 * currTileSize
mapGenerator = MapGenerator(currTileSize, numOfXTiles, numOfYTiles)
roadTiles, landTiles, flag = mapGenerator.generateMap()
vehicle = Vehicle(vehicleStartX, vehicleStartY)
mapSprites = pg.sprite.RenderPlain((roadTiles, landTiles))
vehicleSprite = pg.sprite.RenderPlain((vehicle))
flagSprite = pg.sprite.RenderPlain((flag))
scoreText = scoreFont.render(str(score), False, (0, 0, 255))
roundIsRunning = True
while(gameIsRunning and roundIsRunning):
clock.tick(FRAMES_PER_SECOND)
gameIsRunning = eventListener(gameIsRunning, vehicle)
vehicleSprite.update()
mapSprites.draw(screen)
flagSprite.draw(screen)
vehicleSprite.draw(screen)
screen.blit(scoreText, (10, 10))
livesText = scoreFont.render(str(lives), False, (0, 0, 255))
screen.blit(livesText, (730, 10))
if (pg.sprite.spritecollide(vehicle, flagSprite, True)):
roundIsRunning = False
for sprite in mapSprites:
sprite.kill()
for sprite in flagSprite:
sprite.kill()
if(pg.sprite.spritecollide(vehicle, landTiles, False) or
vehicle.rect.left < 0 or vehicle.rect.right > SCREEN_WIDTH_APPROX or
vehicle.rect.top < 0 or vehicle.rect.bottom > SCREEN_HEIGHT_APPROX):
lives -= 1
if(lives <= 0):
gameIsRunning = False
roundIsRunning = False
else:
vehicle.restart()
pg.display.flip()
def eventListener(gameIsRunning, vehicle):
for event in pg.event.get():
if (event.type == pg.QUIT):
gameIsRunning = False
elif (event.type == pg.KEYDOWN):
if (event.key == pg.K_w):
vehicle.accelerationStatus = 'accelerate'
elif (event.key == pg.K_s):
vehicle.accelerationStatus = 'reverse'
elif (event.key == pg.K_d):
vehicle.turningStatus = 'right'
elif (event.key == pg.K_a):
vehicle.turningStatus = 'left'
elif (event.type == pg.KEYUP):
if (event.key == pg.K_w):
vehicle.accelerationStatus = 'decelerate'
elif (event.key == pg.K_s):
vehicle.accelerationStatus = 'decelerate'
elif (event.key == pg.K_d):
vehicle.turningStatus = 'straight'
elif (event.key == pg.K_a):
vehicle.turningStatus = 'straight'
return gameIsRunning
def initializeScreen(tileSize):
numOfXTiles = int(SCREEN_WIDTH_APPROX / tileSize)
numOfYTiles = int(SCREEN_HEIGHT_APPROX / tileSize)
screenWidth = tileSize * numOfXTiles
screenHeight = tileSize * numOfYTiles
screen = pg.display.set_mode((screenWidth, screenHeight))
return numOfXTiles, numOfYTiles, screen
if __name__ == '__main__':
main()
``` |
{
"source": "jonathonmcmurray/tvsorter",
"score": 3
} |
#### File: jonathonmcmurray/tvsorter/framematch.py
```python
from PIL import Image
import os
import imagehash
import cv2
import glob
from datetime import datetime
import util
def log(s):
"""
Simple basic logging output with timestamp
"""
now = datetime.now()
print(now.strftime("%Y-%m-%d %H:%M:%S") + " " + s)
return
def hashframe(i):
"""
Get imagehash for a frame passed as a numpy array
"""
return imagehash.average_hash(Image.fromarray(i))
def getclosest(mins):
"""
Given a dict of thumbnail matches for a video, return overall closest match(es)
"""
return [k for k,v in mins.items() if v == min(mins.values())]
def matchvideo(video,pattern):
"""
Match a single video to a glob of thumbnails, return early if exact match is found
"""
log(f"Beginning scan for {video}")
# generate set of hashes to compare to
imgs = []
hashes = []
for f in sorted(glob.glob(pattern)):
imgs.append(f)
hashes.append(imagehash.average_hash(Image.open(f)))
# iterate through frames of videos until finding a match
v = cv2.VideoCapture(video)
s,i = v.read()
mins = {}
while s:
hash = hashframe(i)
cmp = [h - hash for h in hashes]
mins = util.mineach(mins,dict(zip(imgs,cmp)))
if 0 == min(cmp):
log(f"Found an exact match with {imgs[cmp.index(0)]}")
break
s,i = v.read()
log("Finished scanning video")
return mins
def getmatches(final,matches):
# iterate over all the unmatched videos remaining
for k in list(matches):
# k is video name
# get closest thumbnail for this video, excluding any already there
m = list(filter(lambda x:not x in final.values(),matches[k]))
ms = dict([(k,v) for k,v in matches[k].items() if k in m])
c = getclosest(ms)
# if one single closest, take it & remove from the potentials
if 1 == len(c):
final[k] = c[0]
matches.pop(k)
# while still some potentials, recurse
if 0<len(matches):
final = getmatches(final,matches)
return final
def allvideos(videopattern,thumbpattern):
"""
Iterate through a glob of videos & match each one to closest thumbnail
"""
renames = {}
matches = {}
# potential = {}
for f in sorted(glob.glob(videopattern)):
matches[f] = matchvideo(f,thumbpattern)
# c = getclosest(matches[f])
# if 1 == len(c):
# #renames[f] = os.path.splitext(os.path.basename(c[0]))[0] + ".mkv"
# renames[f] = c[0]
# else:
# potential[f] = c
# # remove any items that have already matched perfectly
# for f,p in potential.items():
# potential[f] = [x for x in p if not x in renames.values()]
renames = getmatches({},matches)
print('"Good" matches:')
util.prettydict(renames)
# print('\n"Potential matches')
# util.prettydict(potential)
# TODO add logic here to break any ties & create rename dict
return [renames,matches]
#return matches
```
#### File: jonathonmcmurray/tvsorter/tvdb.py
```python
import requests
import json
import os
# credentials for TVDB API
apikey = ""
userkey = ""
username = ""
# put credentials in a dictionary for ease of use
apiauth = {"apikey": apikey, "userkey": userkey, "username": username}
# global string to hold JWT once authenticated
jwt = ""
def authenticate(credentials):
"""
Authenticate with TVDB API & set global JWT for use in other requests
"""
global jwt
r = requests.post("https://api.thetvdb.com/login",json=credentials)
jwt = r.json()["token"]
return
def tvdbget(path,params={}):
"""
Send a simple HTTP GET request to TVDB using global JWT
"""
header = {"Accept":"application/json","Authorization":"Bearer "+jwt}
r = requests.get("https://api.thetvdb.com"+path,headers=header,params=params)
return r.json()
def pagedget(path):
"""
Perform a paged request on TVDB API i.e. retrieve full data set via multiple requests if required
"""
r = tvdbget(path)
data = r['data']
while r['links']['next'] != None:
r = tvdbget(path,params={'page':r['links']['next']})
data += r['data']
return data
def getthumb(episode,target="."):
"""
Passed an episode dictionary, retrieves the episode thumbnail & saves with appropriate filename
"""
fn = f"s{episode['airedSeason']:02}e{episode['airedEpisodeNumber']:02}.{episode['episodeName']}.jpg"
fn = os.path.join(target,fn.replace(" ",""))
url = "https://artworks.thetvdb.com/banners/"+episode['filename']
print(f"Saving '{url}' as '{fn}'")
with open(fn,'wb') as f:
i = requests.get(url)
f.write(i.content)
return
def getallthumbs(episodes,target="."):
"""
Passed a list of episode dictionaries, iterate to download all thumbnails
"""
for episode in episodes:
getthumb(episode,target)
return
``` |
{
"source": "jonathonmellor/mimesis-stats",
"score": 2
} |
#### File: mimesis-stats/tests/test_stats_schema.py
```python
import pytest
from mimesis_stats.stats_schema import StatsSchema
@pytest.mark.parametrize(
"input, exclude, expected_result",
[
({"level0": "example1"}, [], {"level0": "example1"}),
(
{"level0.0": "example1", "level0.1": {"level1.0": 1, "level1.1": 2}},
[],
{"level0.0": "example1", "level1.0": 1, "level1.1": 2},
),
(
{"level0.0": "example1", "level0.1": {"level1.0": 1, "level1.1": 2}},
["level0.1"],
{"level0.0": "example1", "level0.1": {"level1.0": 1, "level1.1": 2}},
),
],
)
def test_unnest(input, exclude, expected_result):
s_schema = StatsSchema()
assert s_schema._unnest(input, exclude=exclude) == expected_result
@pytest.mark.parametrize(
"inputs, iterations, expected_result",
[
({"v1": {"name": "dummy_number", "provider_method": "dummy.one"}}, 1, [{"dummy_number": 1}]),
(
{"v1": {"name": "dummy_number", "provider_method": "dummy.one"}},
2,
[{"dummy_number": 1}, {"dummy_number": 1}],
),
({"v1": {"name": "dummy_dict", "provider_method": "dummy.dictionary"}}, 1, [{"collins": "defines"}]),
(
{
"v1": {"name": "dummy_number", "provider_method": "dummy.one"},
"v2": {"name": "dummy_string", "provider_method": "dummy.characters"},
},
1,
[{"dummy_number": 1, "dummy_string": "ABC"}],
),
],
)
def test_stats_schema_create(dummy_field, inputs, iterations, expected_result):
schema = lambda: { # noqa: E731
variable["name"]: dummy_field(variable["provider_method"]) for variable in inputs.values()
}
s_schema = StatsSchema(schema=schema)
result = s_schema.create(iterations=iterations)
assert result == expected_result
def test_nested_generation(dummy_field):
schema = lambda: {"nest": dummy_field("choice", items=["hard", dummy_field("dummy.one")])} # noqa: E731
s_schema = StatsSchema(schema=schema)
# not technically deterministic
n = 10000
# p FN = (0.5)^n, n~10,000, p~0, beyond floating point recording discrepency
result = s_schema.create(iterations=n)
values = [variable["nest"] for variable in result]
assert set(values) == set([1, "hard"])
def test_nested_generation_deterministic(dummy_field):
schema = lambda: { # noqa: E731
"nest": dummy_field("choice", items=["hard", dummy_field("choice", items=["A", "B"])])
}
s_schema = StatsSchema(schema=schema)
# not technically deterministic
n = 10000
# p FN = (0.5)^n, n~10,000, p~0, beyond floating point recording discrepency
result = s_schema.create(iterations=n)
values = [variable["nest"] for variable in result]
assert set(values) == set(["A", "B", "hard"])
``` |
{
"source": "JonathonMSmith/cdflib",
"score": 2
} |
#### File: cdflib/cdflib/__init__.py
```python
from . import cdfread
from . import cdfwrite
from .epochs import CDFepoch as cdfepoch # noqa: F401
try:
from .epochs_astropy import CDFAstropy as cdfastropy
except Exception:
pass
from pathlib import Path
# This function determines if we are reading or writing a file
def CDF(path, cdf_spec=None, delete=False, validate=None):
path = Path(path).expanduser()
if path.is_file():
if delete:
path.unlink()
return
else:
return cdfread.CDF(path, validate=validate)
else:
return cdfwrite.CDF(path, cdf_spec=cdf_spec, delete=delete)
``` |
{
"source": "JonathonMSmith/pysat",
"score": 3
} |
#### File: pysat/pysat/_files.py
```python
from __future__ import print_function
from __future__ import absolute_import
import string
import os
import weakref
import re
import glob
import numpy as np
import pandas as pds
from pysat import data_dir as data_dir
class Files(object):
"""Maintains collection of files for instrument object.
Uses the list_files functions for each specific instrument
to create an ordered collection of files in time. Used by
instrument object to load the correct files. Files also
contains helper methods for determining the presence of
new files and creating an ordered list of files.
Attributes
----------
base_path : string
path to .pysat directory in user home
start_date : datetime
date of first file, used as default start bound for instrument
object
stop_date : datetime
date of last file, used as default stop bound for instrument
object
data_path : string
path to the directory containing instrument files,
top_dir/platform/name/tag/
manual_org : bool
if True, then Files will look directly in pysat data directory
for data files and will not use /platform/name/tag
update_files : bool
updates files on instantiation if True
Note
----
User should generally use the interface provided by a pysat.Instrument
instance. Exceptions are the classmethod from_os, provided to assist
in generating the appropriate output for an instrument routine.
Examples
--------
::
# convenient file access
inst = pysat.Instrument(platform=platform, name=name, tag=tag,
sat_id=sat_id)
# first file
inst.files[0]
# files from start up to stop (exclusive on stop)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,3)
print(vefi.files[start:stop])
# files for date
print(vefi.files[start])
# files by slicing
print(vefi.files[0:4])
# get a list of new files
# new files are those that weren't present the last time
# a given instrument's file list was stored
new_files = vefi.files.get_new()
# search pysat appropriate directory for instrument files and
# update Files instance.
vefi.files.refresh()
"""
def __init__(self, sat, manual_org=False, directory_format=None,
update_files=False, file_format=None, write_to_disk=True):
""" Initialization for Files class object
Parameters
-----------
sat : pysat._instrument.Instrument
Instrument object
manual_org : boolian
If True, then pysat will look directly in pysat data directory
for data files and will not use default /platform/name/tag
(default=False)
directory_format : string or NoneType
directory naming structure in string format. Variables such as
platform, name, and tag will be filled in as needed using python
string formatting. The default directory structure would be
expressed as '{platform}/{name}/{tag}' (default=None)
update_files : boolean
If True, immediately query filesystem for instrument files and store
(default=False)
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and sat_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine. (default=None)
write_to_disk : boolean
If true, the list of Instrument files will be written to disk.
Prevents a rare condition when running multiple pysat processes.
"""
# pysat.Instrument object
self._sat = weakref.proxy(sat)
# location of .pysat file
self.home_path = os.path.join(os.path.expanduser('~'), '.pysat')
self.start_date = None
self.stop_date = None
self.files = pds.Series(None)
# location of stored files
self.stored_file_name = ''.join((self._sat.platform,'_', self._sat.name,
'_',self._sat.tag, '_',
self._sat.sat_id,
'_stored_file_info.txt'))
# flag for setting simple organization of files, only
# look under pysat_data_dir
self.manual_org = manual_org
# path for sub-directories under pysat data path
if directory_format is None:
directory_format = os.path.join('{platform}','{name}','{tag}')
self.directory_format = directory_format
# user-specified file format
self.file_format = file_format
if manual_org:
self.sub_dir_path = ''
else:
# construct subdirectory path
self.sub_dir_path = \
self.directory_format.format(name=self._sat.name,
platform=self._sat.platform,
tag=self._sat.tag,
sat_id=self._sat.sat_id)
# make sure path always ends with directory seperator
self.data_path = os.path.join(data_dir, self.sub_dir_path)
if self.data_path[-2] == os.path.sep:
self.data_path = self.data_path[:-1]
elif self.data_path[-1] != os.path.sep:
self.data_path = os.path.join(self.data_path, '')
self.write_to_disk = write_to_disk
if write_to_disk is False:
self._previous_file_list = pds.Series([], dtype='a')
self._current_file_list = pds.Series([], dtype='a')
if self._sat.platform != '':
# load stored file info
info = self._load()
if not info.empty:
self._attach_files(info)
if update_files:
self.refresh()
else:
# couldn't find stored info, load file list and then store
self.refresh()
def _attach_files(self, files_info):
"""Attach results of instrument list_files routine to Instrument object
Parameters
-----------
file_info :
Stored file information
Returns
---------
updates the file list (files), start_date, and stop_date attributes
of the Files class object.
"""
if not files_info.empty:
if(not self._sat.multi_file_day and
len(files_info.index.unique()) != len(files_info)):
estr = 'WARNING! Duplicate datetimes in provided file '
estr = '{:s}information.\nKeeping one of each '.format(estr)
estr = '{:s}of the duplicates, dropping the rest.'.format(estr)
print(estr)
print(files_info.index.get_duplicates())
idx = np.unique(files_info.index, return_index=True)
files_info = files_info.ix[idx[1]]
#raise ValueError('List of files must have unique datetimes.')
self.files = files_info.sort_index()
date = files_info.index[0]
self.start_date = pds.datetime(date.year, date.month, date.day)
date = files_info.index[-1]
self.stop_date = pds.datetime(date.year, date.month, date.day)
else:
self.start_date = None
self.stop_date = None
# convert to object type
# necessary if Series is empty, enables == checks with strings
self.files = files_info.astype(np.dtype('O'))
def _store(self):
"""Store currently loaded filelist for instrument onto filesystem"""
name = self.stored_file_name
# check if current file data is different than stored file list
# if so, move file list to previous file list, store current to file
# if not, do nothing
stored_files = self._load()
if len(stored_files) != len(self.files):
# # of items is different, things are new
new_flag = True
elif len(stored_files) == len(self.files):
# # of items equal, check specifically for equality
if stored_files.eq(self.files).all():
new_flag = False
else:
# not equal, there are new files
new_flag = True
if new_flag:
if self.write_to_disk:
stored_files.to_csv(os.path.join(self.home_path,
'previous_'+name),
date_format='%Y-%m-%d %H:%M:%S.%f')
self.files.to_csv(os.path.join(self.home_path, name),
date_format='%Y-%m-%d %H:%M:%S.%f')
else:
self._previous_file_list = stored_files
self._current_file_list = self.files.copy()
return
def _load(self, prev_version=False):
"""Load stored filelist and return as Pandas Series
Parameters
----------
prev_version : boolean
if True, will load previous version of file list
Returns
-------
pandas.Series
Full path file names are indexed by datetime
Series is empty if there is no file list to load
"""
fname = self.stored_file_name
if prev_version:
fname = os.path.join(self.home_path, 'previous_'+fname)
else:
fname = os.path.join(self.home_path, fname)
if os.path.isfile(fname) and (os.path.getsize(fname) > 0):
if self.write_to_disk:
return pds.read_csv(fname, index_col=0, parse_dates=True,
squeeze=True, header=None)
else:
# grab files from memory
if prev_version:
return self._previous_file_list
else:
return self._current_file_list
else:
return pds.Series([], dtype='a')
def refresh(self):
"""Update list of files, if there are changes.
Calls underlying list_rtn for the particular science instrument.
Typically, these routines search in the pysat provided path,
pysat_data_dir/platform/name/tag/,
where pysat_data_dir is set by pysat.utils.set_data_dir(path=path).
"""
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self._sat.platform,
name=self._sat.name, tag=self._sat.tag,
sat_id=self._sat.sat_id)
output_str = " ".join(("pysat is searching for", output_str, "files."))
output_str = " ".join(output_str.split())
print (output_str)
info = self._sat._list_rtn(tag=self._sat.tag, sat_id=self._sat.sat_id,
data_path=self.data_path,
format_str=self.file_format)
if not info.empty:
print('Found {ll:d} of them.'.format(ll=len(info)))
else:
estr = "Unable to find any files that match the supplied template. "
estr += "If you have the necessary files please check pysat "
estr += "settings and file locations (e.g. pysat.pysat_dir)."
print(estr)
info = self._remove_data_dir_path(info)
self._attach_files(info)
self._store()
def get_new(self):
"""List new files since last recorded file state.
pysat stores filenames in the user_home/.pysat directory. Returns
a list of all new fileanmes since the last known change to files.
Filenames are stored if there is a change and either update_files
is True at instrument object level or files.refresh() is called.
Returns
-------
pandas.Series
files are indexed by datetime
"""
# refresh files
self.refresh()
# current files
new_info = self._load()
# previous set of files
old_info = self._load(prev_version=True)
new_files = new_info[-new_info.isin(old_info)]
return new_files
# def mark_as_new(self, files):
# """Set list of files as new.
#
# """
# pass
# stored_info = self._load()
# if not stored_info.empty: # is not False:
# new_info = self._sat._list_rtn(tag = self._sat.tag,
# data_path=self.data_path,
# format_str=self.file_format)
# new_info = self._remove_data_dir_path(new_info)
# new_files = new_info[~new_info.isin(stored_info) ]
# return new_files
# else:
# print('No previously stored files that we may compare to.')
# return pds.Series([], dtype='a') #False
def get_index(self, fname):
"""Return index for a given filename.
Parameters
----------
fname : string
filename
Note
----
If fname not found in the file information already attached
to the instrument.files instance, then a files.refresh() call
is made.
"""
idx, = np.where(fname == self.files)
if len(idx) == 0:
# filename not in index, try reloading files from disk
self.refresh()
#print("DEBUG get_index:", fname, self.files)
idx, = np.where(fname == np.array(self.files))
if len(idx) == 0:
raise ValueError('Could not find "' + fname +
'" in available file list. Valid Example: ' +
self.files.iloc[0])
# return a scalar rather than array - otherwise introduces array to
# index warnings.
return idx[0]
# convert this to a normal get so files[in:in2] gives the same as requested
# here support slicing via date and index filename is inclusive slicing,
# date and index are normal non-inclusive end point
def __getitem__(self, key):
if isinstance(key, slice):
try:
out = self.files.ix[key]
except IndexError:
raise IndexError('Date requested outside file bounds.')
if isinstance(key.start, pds.datetime):
# enforce exclusive slicing on datetime
if len(out) > 1:
if out.index[-1] >= key.stop:
return out[:-1]
else:
return out
elif len(out) == 1:
if out.index[0] >= key.stop:
return pds.Series([], dtype='a')
else:
return out
else:
return out
else:
# not a datetime
return out
else:
return self.files.ix[key]
#raise ValueError('Not implemented yet.')
#if isinstance(key, tuple):
# if len(key) == 2:
# start = key[0]
# end = key[1]
# else:
# raise ValueError('Must input 2 and only 2 items/iterables')
def get_file_array(self, start, end):
"""Return a list of filenames between and including start and end.
Parameters
----------
start: array_like or single string
filenames for start of returned filelist
stop: array_like or single string
filenames inclusive end of list
Returns
-------
list of filenames between and including start and end over all
intervals.
"""
if hasattr(start, '__iter__') & hasattr(end, '__iter__'):
files = []
for (sta,stp) in zip(start, end):
id1 = self.get_index(sta)
id2 = self.get_index(stp)
files.extend(self.files.iloc[id1 : id2+1])
elif hasattr(start, '__iter__') | hasattr(end, '__iter__'):
estr = 'Either both or none of the inputs need to be iterable'
raise ValueError(estr)
else:
id1 = self.get_index(start)
id2 = self.get_index(end)
files = self.files[id1:id2+1].to_list()
return files
def _remove_data_dir_path(self, inp=None):
# import string
"""Remove the data directory path from filenames"""
# need to add a check in here to make sure data_dir path is actually in
# the filename
if inp is not None:
split_str = os.path.join(self.data_path, '')
return inp.apply(lambda x: x.split(split_str)[-1])
#elif inp is not None:
#
# return inp.split(split_str)[-1]
# match = os.path.join(self.data_path,'')
# num = len(match)
# return inp.apply(lambda x: x[num:])
@classmethod
def from_os(cls, data_path=None, format_str=None,
two_digit_year_break=None):
"""
Produces a list of files and and formats it for Files class.
Requires fixed_width filename
Parameters
----------
data_path : string
Top level directory to search files for. This directory
is provided by pysat to the instrument_module.list_files
functions as data_path.
format_str : string with python format codes
Provides the naming pattern of the instrument files and the
locations of date information so an ordered list may be produced.
Supports 'year', 'month', 'day', 'hour', 'min', 'sec', 'version',
and 'revision'
Ex: 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
two_digit_year_break : int
If filenames only store two digits for the year, then
'1900' will be added for years >= two_digit_year_break
and '2000' will be added for years < two_digit_year_break.
Note
----
Does not produce a Files instance, but the proper output
from instrument_module.list_files method.
The '?' may be used to indicate a set number of spaces for a variable
part of the name that need not be extracted.
'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v??.cdf'
"""
import collections
from pysat.utils import create_datetime_index
if format_str is None:
raise ValueError("Must supply a filename template (format_str).")
if data_path is None:
raise ValueError("Must supply instrument directory path (dir_path)")
# parse format string to figure out the search string to use
# to identify files in the filesystem
search_str = ''
form = string.Formatter()
# stores the keywords extracted from format_string
keys = []
#, and length of string
snips = []
length = []
stored = collections.OrderedDict()
stored['year'] = []; stored['month'] = []; stored['day'] = [];
stored['hour'] = []; stored['min'] = []; stored['sec'] = [];
stored['version'] = []; stored['revision'] = [];
for snip in form.parse(format_str):
# collect all of the format keywords
# replace them in the string with the '*' wildcard
# then try and get width from format keywords so we know
# later on where to parse information out from
search_str += snip[0]
snips.append(snip[0])
if snip[1] is not None:
keys.append(snip[1])
search_str += '*'
# try and determine formatting width
temp = re.findall(r'\d+', snip[2])
if temp:
# there are items, try and grab width
for i in temp:
if i != 0:
length.append(int(i))
break
else:
raise ValueError("Couldn't determine formatting width")
abs_search_str = os.path.join(data_path, search_str)
files = glob.glob(abs_search_str)
# we have a list of files, now we need to extract the date information
# code below works, but only if the size of file string
# remains unchanged
# determine the locations the date information in a filename is stored
# use these indices to slice out date from loaded filenames
# test_str = format_str.format(**periods)
if len(files) > 0:
idx = 0
begin_key = []
end_key = []
for i,snip in enumerate(snips):
idx += len(snip)
if i < (len(length)):
begin_key.append(idx)
idx += length[i]
end_key.append(idx)
max_len = idx
# setting up negative indexing to pick out filenames
key_str_idx = [np.array(begin_key, dtype=int) - max_len,
np.array(end_key, dtype=int) - max_len]
# need to parse out dates for datetime index
for i,temp in enumerate(files):
for j,key in enumerate(keys):
val = temp[key_str_idx[0][j]:key_str_idx[1][j]]
stored[key].append(val)
# convert to numpy arrays
for key in stored.keys():
stored[key] = np.array(stored[key]).astype(int)
if len(stored[key]) == 0:
stored[key]=None
# deal with the possibility of two digit years
# years above or equal to break are considered to be 1900+
# years below break are considered to be 2000+
if two_digit_year_break is not None:
idx, = np.where(np.array(stored['year']) >=
two_digit_year_break)
stored['year'][idx] = stored['year'][idx] + 1900
idx, = np.where(np.array(stored['year']) < two_digit_year_break)
stored['year'][idx] = stored['year'][idx] + 2000
# need to sort the information for things to work
rec_arr = [stored[key] for key in keys]
rec_arr.append(files)
# sort all arrays
val_keys = keys + ['files']
rec_arr = np.rec.fromarrays(rec_arr, names=val_keys)
rec_arr.sort(order=val_keys, axis=0)
# pull out sorted info
for key in keys:
stored[key] = rec_arr[key]
files = rec_arr['files']
# add hour and minute information to 'sec'
if stored['sec'] is None:
stored['sec'] = np.zeros(len(files))
if stored['hour'] is not None:
stored['sec'] += 3600 * stored['hour']
if stored['min'] is not None:
stored['sec'] += 60 * stored['min']
# if stored['version'] is None:
# stored['version'] = np.zeros(len(files))
if stored['revision'] is None:
stored['revision'] = np.zeros(len(files))
index = create_datetime_index(year=stored['year'],
month=stored['month'],
day=stored['day'], uts=stored['sec'])
# if version and revision are supplied
# use these parameters to weed out files that have been replaced
# with updated versions
# first, check for duplicate index times
dups = index[index.duplicated()].unique()
if (len(dups) > 0) and (stored['version'] is not None):
# we have duplicates
# keep the highest version/revision combo
version = pds.Series(stored['version'], index=index)
revision = pds.Series(stored['revision'], index=index)
revive = version*100000. + revision
frame = pds.DataFrame({'files':files, 'revive':revive,
'time':index}, index=index)
frame = frame.sort_values(by=['time', 'revive'],
ascending=[True, False])
frame = frame.drop_duplicates(subset='time', keep='first')
return frame['files']
else:
return pds.Series(files, index=index)
else:
return pds.Series(None)
```
#### File: pysat/instruments/jro_isr.py
```python
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import pandas as pds
import functools
import pysat
from . import madrigal_methods as mad_meth
from . import nasa_cdaweb_methods as cdw
platform = 'jro'
name = 'isr'
tags = {'drifts':'Drifts and wind', 'drifts_ave':'Averaged drifts',
'oblique_stan':'Standard Faraday rotation double-pulse',
'oblique_rand':'Randomized Faraday rotation double-pulse',
'oblique_long':'Long pulse Faraday rotation'}
sat_ids = {'':list(tags.keys())}
test_dates = {'':{'drifts':pysat.datetime(2010,1,19),
'drifts_ave':pysat.datetime(2010,1,19),
'oblique_stan':pysat.datetime(2010,4,19),
'oblique_rand':pysat.datetime(2000,11,9),
'oblique_long':pysat.datetime(2010,4,12)}}
# support list files routine
# use the default CDAWeb method
jro_fname1 = 'jro{year:4d}{month:02d}{day:02d}'
jro_fname2 = '.{version:03d}.hdf5'
supported_tags = {ss:{'drifts':jro_fname1 + "drifts" + jro_fname2,
'drifts_ave':jro_fname1 + "drifts_avg" + jro_fname2,
'oblique_stan':jro_fname1 + jro_fname2,
'oblique_rand':jro_fname1 + "?" + jro_fname2,
'oblique_long':jro_fname1 + "?" + jro_fname2}
for ss in sat_ids.keys()}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
# madrigal tags
madrigal_inst_code = 10
madrigal_tag = {'':{'drifts':1910, 'drifts_ave':1911, 'oblique_stan':1800,
'oblique_rand':1801, 'oblique_long':1802},}
# let pysat know that data is spread across more than one file
# multi_file_day=True
# Set to False to specify using xarray (not using pandas)
# Set to True if data will be returned via a pandas DataFrame
pandas_format = False
# support load routine
load = functools.partial(mad_meth.load, xarray_coords=['gdalt'])
# Madrigal will sometimes include multiple days within a file
# labeled with a single date.
# Filter out this extra data using the pysat nanokernel processing queue.
# To ensure this function is always applied first, we set the filter
# function as the default function for (JRO).
# Default function is run first by the nanokernel on every load call.
default = pysat.instruments.madrigal_methods.filter_data_single_date
def init(self):
"""Initializes the Instrument object with values specific to JRO ISR
Runs once upon instantiation.
Parameters
----------
self : pysat.Instrument
This object
Returns
--------
Void : (NoneType)
Object modified in place.
"""
print ("The Jicamarca Radio Observatory is operated by the Instituto " +
"Geofisico del Peru, Ministry of Education, with support from the" +
" National Science Foundation as contracted through Cornell" +
" University. " + mad_meth.cedar_rules())
return
def download(date_array, tag='', sat_id='', data_path=None, user=None,
password=None):
"""Downloads data from Madrigal.
Parameters
----------
date_array : array-like
list of datetimes to download data for. The sequence of dates need not
be contiguous.
tag : string ('')
Tag identifier used for particular dataset. This input is provided by
pysat.
sat_id : string ('')
Satellite ID string identifier used for particular dataset. This input
is provided by pysat.
data_path : string (None)
Path to directory to download data to.
user : string (None)
User string input used for download. Provided by user and passed via
pysat. If an account
is required for dowloads this routine here must error if user not
supplied.
password : string (None)
Password for data download.
Returns
--------
Void : (NoneType)
Downloads data to disk.
Notes
-----
The user's names should be provided in field user. <NAME>-Scott should
be entered as Ruby+Payne-Scott
The password field should be the user's email address. These parameters
are passed to Madrigal when downloading.
The affiliation field is set to pysat to enable tracking of pysat downloads.
"""
mad_meth.download(date_array, inst_code=str(madrigal_inst_code),
kindat=str(madrigal_tag[sat_id][tag]),
data_path=data_path, user=user, password=password)
def clean(self):
"""Routine to return JRO ISR data cleaned to the specified level
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
Supports 'clean', 'dusty', 'dirty'
'Clean' is unknown for oblique modes, over 200 km for drifts
'Dusty' is unknown for oblique modes, over 200 km for drifts
'Dirty' is unknown for oblique modes, over 200 km for drifts
'None' None
Routine is called by pysat, and not by the end user directly.
"""
import numpy as np
idx = list()
if self.tag.find('oblique') == 0:
print('The double pulse, coded pulse, and long pulse modes ' +
'implemented at Jicamarca have different limitations arising ' +
'from different degrees of precision and accuracy. Users ' +
'should consult with the staff to determine which mode is ' +
'right for their application.')
if self.clean_level in ['clean', 'dusty', 'dirty']:
print('WARNING: this level 2 data has no quality flags')
else:
if self.clean_level in ['clean', 'dusty', 'dirty']:
if self.clean_level in ['clean', 'dusty']:
print('WARNING: this level 2 data has no quality flags')
idx, = np.where((self['gdalt'] > 200.0))
else:
print("WARNING: interpretation of drifts below 200 km should " +
"always be done in partnership with the contact people")
# downselect data based upon cleaning conditions above
self.data = self[idx]
return
def calc_measurement_loc(self):
""" Calculate the instrument measurement location in geographic coordinates
Returns
-------
Void : adds 'gdlat#', 'gdlon#' to the instrument, for all directions that
have azimuth and elevation keys that match the format 'eldir#' and 'azdir#'
"""
import numpy as np
import pandas as pds
from pysat import utils
az_keys = [kk[5:] for kk in list(self.data.keys()) if kk.find('azdir') == 0]
el_keys = [kk[5:] for kk in list(self.data.keys()) if kk.find('eldir') == 0]
good_dir = list()
for i,kk in enumerate(az_keys):
if kk in el_keys:
try:
good_dir.append(int(kk))
except:
print("WARNING: unknown direction number [{:}]".format(kk))
# Calculate the geodetic latitude and longitude for each direction
if len(good_dir) == 0:
raise ValueError("No matching azimuth and elevation data included")
for dd in good_dir:
# Format the direction location keys
az_key = 'azdir{:d}'.format(dd)
el_key = 'eldir{:d}'.format(dd)
lat_key = 'gdlat{:d}'.format(dd)
lon_key = 'gdlon{:d}'.format(dd)
# JRO is located 520 m above sea level (jro.igp.gob.pe./english/)
# Also, altitude has already been calculated
gdaltr = np.ones(shape=self['gdlonr'].shape) * 0.52
gdlat, gdlon, _ = utils.local_horizontal_to_global_geo(self[az_key],
self[el_key],
self['range'],
self['gdlatr'],
self['gdlonr'],
gdaltr,
geodetic=True)
# Assigning as data, to ensure that the number of coordinates match
# the number of data dimensions
self.data = self.data.assign(lat_key=gdlat, lon_key=gdlon)
self.data.rename({"lat_key":lat_key, "lon_key":lon_key}, inplace=True)
return
```
#### File: pysat/instruments/template_cdaweb_instrument.py
```python
from __future__ import print_function
from __future__ import absolute_import
import pandas as pds
import numpy as np
import pysat
import sys
import functools
# CDAWeb methods prewritten for pysat
from . import nasa_cdaweb_methods as cdw
# the platform and name strings associated with this instrument
# need to be defined at the top level
# these attributes will be copied over to the Instrument object by pysat
# the strings used here should also be used to name this file
# platform_name.py
platform = ''
name = ''
# dictionary of data 'tags' and corresponding description
tags = {'':'description 1', # this is the default
'tag_string': 'description 2'}
# Let pysat know if there are multiple satellite platforms supported
# by these routines
# define a dictionary keyed by satellite ID, each with a list of
# corresponding tags
# sat_ids = {'a':['L1', 'L0'], 'b':['L1', 'L2'], 'c':['L1', 'L3']}
sat_ids = {'':['']}
# Define good days to download data for when pysat undergoes testing.
# format is outer dictionary has sat_id as the key
# each sat_id has a dictionary of test dates keyed by tag string
# test_dates = {'a':{'L0':pysat.datetime(2019,1,1),
# 'L1':pysat.datetime(2019,1,2)},
# 'b':{'L1':pysat.datetime(2019,3,1),
# 'L2':pysat.datetime(2019,11,23),}}
test_dates = {'':{'':pysat.datetime(2019,1,1)}}
# Additional information needs to be defined
# to support the CDAWeb list files routine
# We need to define a filename format string for every
# supported combination of sat_id and tag string
# fname1 = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
# fname2 = 'cnofs_vefi_acfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
# supported_tags = {'sat1':{'tag1':fname1},
# 'sat2':{'tag2':fname2}}
# you can use format keywords year, month, day, hour, min, sec,
# version and revision
# see code docstring for latest
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'':{'':fname}}
# use the CDAWeb methods list files routine
# the command below presets some of the methods inputs, leaving
# those provided by pysat available when invoked
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
#
# support load routine
#
# use the default CDAWeb method
# no other information needs to be supplied here
# pysatCDF is used to load data
load = cdw.load
#
# support download routine
#
# to use the default CDAWeb method
# we need to provide additional information
# directory location on CDAWeb ftp server
# formatting template for filenames on CDAWeb
# formatting template for files saved to the local disk
# a dictionary needs to be created for each sat_id and tag
# combination along with the file format template
# outer dict keyed by sat_id, inner dict keyed by tag
basic_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec',
'remote_fname':'{year:4d}/'+fname,
'local_fname':fname}
supported_tags = {'':{'':basic_tag}}
download = functools.partial(cdw.download, supported_tags)
# code should be defined below as needed
def default(self):
"""Default customization function.
This routine is automatically applied to the Instrument object
on every load by the pysat nanokernel (first in queue).
Parameters
----------
self : pysat.Instrument
This object
Returns
--------
Void : (NoneType)
Object modified in place.
"""
return
# code should be defined below as needed
def clean(inst):
"""Routine to return PLATFORM/NAME data cleaned to the specified level
Cleaning level is specified in inst.clean_level and pysat
will accept user input for several strings. The clean_level is
specified at instantiation of the Instrument object.
'clean' All parameters should be good, suitable for statistical and
case studies
'dusty' All paramers should generally be good though same may
not be great
'dirty' There are data areas that have issues, data should be used
with caution
'none' No cleaning applied, routine not called in this case.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
-----
"""
return
``` |
{
"source": "JonathonMSmith/pysatSpaceWeather",
"score": 3
} |
#### File: pysatSpaceWeather/instruments/ace_sis.py
```python
import datetime as dt
import functools
import numpy as np
from pysat import logger
from pysatSpaceWeather.instruments.methods import ace as mm_ace
from pysatSpaceWeather.instruments.methods import general
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'ace'
name = 'sis'
tags = {'realtime': 'Real-time data from the SWPC',
'historic': ' Historic data from the SWPC'}
inst_ids = {inst_id: [tag for tag in tags.keys()] for inst_id in ['']}
# Define today's date
now = dt.datetime.utcnow()
# ----------------------------------------------------------------------------
# Instrument test attributes
# Set test dates (first level: inst_id, second level: tag)
_test_dates = {inst_id: {'realtime': dt.datetime(now.year, now.month, now.day),
'historic': dt.datetime(2009, 1, 1)}
for inst_id in inst_ids.keys()}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
# Set the appropraite acknowledgements and references
self.acknowledgements = mm_ace.acknowledgements()
self.references = mm_ace.references(self.name)
logger.info(self.acknowledgements)
return
def clean(self):
"""Routine to clean real-time ACE data using the status flag
Note
----
Supports 'clean' and 'dirty'. Replaces all fill values with NaN.
Clean - status flag of zero (nominal data)
Dirty - status flag < 9 (accepts bad data record, removes no data record)
"""
# Perform the standard ACE cleaning
max_status = mm_ace.clean(self)
# Evaluate the different proton fluxes. Replace bad values with NaN and
# times with no valid data
self.data['int_pflux_10MeV'][self.data['status_10'] > max_status] = np.nan
self.data['int_pflux_30MeV'][self.data['status_30'] > max_status] = np.nan
eval_cols = ['int_pflux_10MeV', 'int_pflux_30MeV']
# Remove lines without any good data
good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)
bad_index = good_cols[good_cols == 0].index
self.data = self.data.drop(index=bad_index)
return
# ----------------------------------------------------------------------------
# Instrument functions
download = functools.partial(mm_ace.download, name=name, now=now)
list_files = functools.partial(mm_ace.list_files, name=name)
def load(fnames, tag=None, inst_id=None):
"""Load the ACE space weather prediction data
Parameters
----------
fnames : array-like
Series, list, or array of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
ACE instrument or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing instrument data
meta : pysat.Meta
Object containing metadata such as column names and units
Raises
------
ValueError
When unknown inst_id is supplied.
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Save each file to the output DataFrame
data = mm_ace.load_csv_data(fnames, read_csv_kwargs={'index_col': 0,
'parse_dates': True})
# Assign the meta data
meta, status_desc = mm_ace.common_metadata()
flux_name = 'Integral Proton Flux'
meta['status_10'] = {meta.labels.units: '',
meta.labels.name: ''.join([flux_name,
' > 10 MeV Status']),
meta.labels.notes: '',
meta.labels.desc: status_desc,
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['status_30'] = {meta.labels.units: '',
meta.labels.name: ''.join([flux_name,
' > 30 MeV Status']),
meta.labels.notes: '',
meta.labels.desc: status_desc,
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['int_pflux_10MeV'] = {meta.labels.units: 'p/cs2-sec-ster',
meta.labels.name: ''.join([flux_name,
' > 10 MeV']),
meta.labels.notes: '',
meta.labels.desc: ''.join(['5-min averaged ',
flux_name,
' > 10 MeV']),
meta.labels.fill_val: -1.0e5,
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['int_pflux_30MeV'] = {meta.labels.units: 'p/cs2-sec-ster',
meta.labels.name: ''.join([flux_name,
' > 30 MeV']),
meta.labels.notes: '',
meta.labels.desc: ''.join(['5-min averaged ',
flux_name,
' > 30 MeV']),
meta.labels.fill_val: -1.0e5,
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
return data, meta
```
#### File: instruments/methods/dst.py
```python
def acknowledgements(name, tag):
"""Returns acknowledgements for space weather dataset
Parameters
----------
name : string
Name of space weather index, eg, dst, f107, kp
tag : string
Tag of the space weather index
"""
ackn = {'dst':
{'noaa': 'Dst is maintained at NCEI (formerly NGDC) at NOAA'}}
return ackn[name][tag]
def references(name, tag):
"""Returns references for space weather dataset
Parameters
----------
name : string
Name of space weather index, eg, dst, f107, kp
tag : string
Tag of the space weather index
"""
refs = {'dst': {'noaa': ''.join([
'See referenece list and publication at: <NAME> <NAME>, '
'http://wdc.kugi.kyoto-u.ac.jp/dstdir/dst2/onDstindex.html, ',
'last updated June 1991, accessed Dec 2020'])}}
return refs[name][tag]
```
#### File: pysatSpaceWeather/instruments/sw_f107.py
```python
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd <PASSWORD>@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# Write out as a file
data_file = 'f107_forecast_{:s}.txt'.format(
dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
elif tag == '45day':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get to the forecast data
raw_data = req.text.split('45-DAY AP FORECAST')[-1]
# Grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
raw_ap = raw_ap.split('\n')[1:-1]
# Get the F107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
raw_f107 = raw_f107.split('\n')[1:-4]
# Parse the AP data
ap_times, ap = mm_f107.parse_45day_block(raw_ap)
# Parse the F10.7 data
f107_times, f107 = mm_f107.parse_45day_block(raw_f107)
# Collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# Write out as a file
data_file = 'f107_45day_{:s}.txt'.format(dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
return
``` |
{
"source": "jonathonpsmith/GeodePy",
"score": 3
} |
#### File: GeodePy/geodepy/angles.py
```python
from math import radians
class DECAngle(float):
"""
Class for working with angles in Decimal Degrees
Note: GeodePy also supports working with angles in Decimal Degrees as floats
"""
def __init__(self, dec_angle=0.0):
"""
:param dec_angle: float Decimal Degrees angle
"""
super().__init__()
self.dec_angle = float(dec_angle)
def __repr__(self):
if self.dec_angle >= 0:
return '{DECAngle: +' + str(self.dec_angle) + '}'
else: # negative
return '{DECAngle: ' + str(self.dec_angle) + '}'
def __add__(self, other):
try:
return DECAngle(self.dec() + other.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __radd__(self, other):
try:
return DECAngle(other.dec() + self.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __sub__(self, other):
try:
return DECAngle(self.dec() - other.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __rsub__(self, other):
try:
return DECAngle(other.dec() - self.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __mul__(self, other):
try:
return DECAngle(self.dec() * other)
except TypeError:
raise TypeError('Multiply only defined between DECAngle Object '
'and Int or Float')
def __rmul__(self, other):
try:
return DECAngle(other * self.dec())
except TypeError:
raise TypeError('Multiply only defined between DECAngle Object '
'and Int or Float')
def __truediv__(self, other):
try:
return DECAngle(self.dec() / other)
except TypeError:
raise TypeError('Division only defined between DECAngle Object '
'and Int or Float')
def __abs__(self):
return DECAngle(abs(self.dec_angle))
def __neg__(self):
return DECAngle(-self.dec())
def __eq__(self, other):
return self.dec() == other.dec()
def __ne__(self, other):
return self.dec() != other.dec()
def __lt__(self, other):
return self.dec() < other.dec()
def __gt__(self, other):
return self.dec() > other.dec()
def __int__(self):
return int(self.dec_angle)
def __float__(self):
return float(self.dec_angle)
def __str__(self):
return str(self.dec_angle)
def __round__(self, n=None):
return DECAngle(round(self.dec_angle, n))
def rad(self):
"""
Convert to radians
:return: radians
:rtype: float
"""
return radians(self.dec_angle)
def dec(self):
"""
Convert to Decimal Degrees (float)
:return: Decimal Degrees
:rtype: float
"""
return self.dec_angle
def hp(self):
"""
Convert to HP Notation
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
return dec2hp(self.dec_angle)
def hpa(self):
"""
Convert to HP Notation (class)
:return: HP Notation (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(self.hp())
def gon(self):
"""
Convert to Gradians (float)
:return: Gradians
:rtype: float
"""
return dec2gon(self.dec_angle)
def gona(self):
"""
Convert to Gradians (class)
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(dec2gon(self.dec_angle))
def dms(self):
"""
Convert to Degrees, Minutes, Seconds Object
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
return dec2dms(self.dec_angle)
def ddm(self):
"""
Convert to Degrees, Decimal Minutes Object
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
return dec2ddm(self.dec_angle)
class HPAngle(object):
"""
Class for working with angles in Hewlett-Packard (HP) format
Note: GeodePy also supports working with angles in HP format as floats
"""
def __init__(self, hp_angle=0.0):
"""
:param hp_angle: float HP angle
"""
self.hp_angle = float(hp_angle)
hp_dec_str = f'{self.hp_angle:.17f}'.split('.')[1]
if int(hp_dec_str[0]) > 5:
raise ValueError(f'Invalid HP Notation: 1st decimal place greater '
f'than 5: {self.hp_angle}')
if len(hp_dec_str) > 2:
if int(hp_dec_str[2]) > 5:
raise ValueError(
f'Invalid HP Notation: 3rd decimal place greater '
f'than 5: {self.hp_angle}')
def __repr__(self):
if self.hp_angle >= 0:
return '{HPAngle: +' + str(self.hp_angle) + '}'
else: # negative
return '{HPAngle: ' + str(self.hp_angle) + '}'
def __add__(self, other):
try:
return HPAngle(dec2hp(self.dec() + other.dec()))
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __radd__(self, other):
try:
return HPAngle(dec2hp(other.dec() + self.dec()))
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __sub__(self, other):
try:
return HPAngle(dec2hp(self.dec() - other.dec()))
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __rsub__(self, other):
try:
return HPAngle(dec2hp(other.dec() - self.dec()))
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __mul__(self, other):
try:
return HPAngle(dec2hp(self.dec() * other))
except TypeError:
raise TypeError('Multiply only defined between Angle objects and '
'Int or Float')
def __rmul__(self, other):
try:
return HPAngle(dec2hp(other * self.dec()))
except TypeError:
raise TypeError('Multiply only defined between Angle objects and '
'Int or Float')
def __truediv__(self, other):
try:
return HPAngle(dec2hp(self.dec() / other))
except TypeError:
raise TypeError('Division only defined between HPAngle objects '
'and Int or Float')
def __abs__(self):
return HPAngle(abs(self.hp_angle))
def __neg__(self):
return HPAngle(self.hp_angle.__neg__())
def __eq__(self, other):
return self.dec() == other.dec()
def __ne__(self, other):
return self.dec() != other.dec()
def __lt__(self, other):
return self.dec() < other.dec()
def __gt__(self, other):
return self.dec() > other.dec()
def __int__(self):
return int(self.hp_angle)
def __float__(self):
return float(self.hp_angle)
def __str__(self):
return str(self.hp_angle)
def __round__(self, n=None):
return HPAngle(round(self.hp_angle, n))
def rad(self):
"""
Convert to Radians
:return: Radians
:rtype: float
"""
return radians(hp2dec(self.hp_angle))
def dec(self):
"""
Convert to Decimal Degrees (float)
:return: Decimal Degrees
:rtype: float
"""
return hp2dec(self.hp_angle)
def deca(self):
"""
Convert to Decimal Degrees (class)
:return: Decimal Degrees
:rtype: DECAngle
"""
return DECAngle(self.dec())
def hp(self):
"""
Convert to HP Notation (float)
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
return float(self.hp_angle)
def gon(self):
"""
Convert to Gradians (float)
:return: Gradians
:rtype: float
"""
return hp2gon(self.hp_angle)
def gona(self):
"""
Convert to Gradians (class)
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(self.gon())
def dms(self):
"""
Convert to Degrees, Minutes, Seconds Object
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
return hp2dms(self.hp_angle)
def ddm(self):
"""
Convert to Degrees, Decimal Minutes Object
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
return hp2ddm(self.hp_angle)
class GONAngle(object):
"""
Class for working with angles in Gradians (90 degrees == 100 Gradians)
Note: GeodePy also supports working with angles in Gradians as floats
"""
def __init__(self, gon_angle=0.0):
"""
:param gon_angle: float Gradian angle
"""
super().__init__()
self.gon_angle = float(gon_angle)
def __repr__(self):
if self.gon_angle >= 0:
return '{GONAngle: +' + str(self.gon_angle) + '}'
else: # negative
return '{GONAngle: ' + str(self.gon_angle) + '}'
def __add__(self, other):
try:
return GONAngle(dec2gon(self.dec() + other.dec()))
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __radd__(self, other):
try:
return GONAngle(dec2gon(other.dec() + self.dec()))
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __sub__(self, other):
try:
return GONAngle(dec2gon(self.dec() - other.dec()))
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __rsub__(self, other):
try:
return GONAngle(dec2gon(other.dec() - self.dec()))
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __mul__(self, other):
try:
return GONAngle(dec2gon(self.dec() * other))
except TypeError:
raise TypeError('Multiply only defined between Angle objects and '
'Int or Float')
def __rmul__(self, other):
try:
return GONAngle(dec2gon(other * self.dec()))
except TypeError:
raise TypeError('Multiply only defined between Angle objects and '
'Int or Float')
def __truediv__(self, other):
try:
return GONAngle(dec2gon(self.dec() / other))
except TypeError:
raise TypeError('Division only defined between HPAngle objects '
'and Int or Float')
def __abs__(self):
return GONAngle(abs(self.gon_angle))
def __neg__(self):
return GONAngle(self.gon_angle.__neg__())
def __eq__(self, other):
return self.dec() == other.dec()
def __ne__(self, other):
return self.dec() != other.dec()
def __lt__(self, other):
return self.dec() < other.dec()
def __gt__(self, other):
return self.dec() > other.dec()
def __int__(self):
return int(self.gon_angle)
def __float__(self):
return float(self.gon_angle)
def __str__(self):
return str(self.gon_angle)
def __round__(self, n=None):
return GONAngle(round(self.gon_angle, n))
def rad(self):
"""
Convert to Radians
:return: Radians
:rtype: float
"""
return radians(gon2dec(self.gon_angle))
def dec(self):
"""
Convert to Decimal Degrees (float)
:return: Decimal Degrees
:rtype: float
"""
return gon2dec(self.gon_angle)
def deca(self):
"""
Convert to Decimal Degrees (class)
:return: Decimal Degrees
:rtype: DECAngle
"""
return DECAngle(self.dec())
def hp(self):
"""
Convert to HP Notation (float)
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
return gon2hp(self.gon_angle)
def hpa(self):
"""
Convert to HP Notation (class)
:return: HP Notation (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(gon2hp(self.gon_angle))
def gon(self):
"""
Convert to Gradians (float)
:return: Gradians
:rtype: float
"""
return float(self.gon_angle)
def dms(self):
"""
Convert to Degrees, Minutes, Seconds Object
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
return gon2dms(self.gon_angle)
def ddm(self):
"""
Convert to Degrees, Decimal Minutes Object
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
return gon2ddm(self.gon_angle)
class DMSAngle(object):
"""
Class for working with angles in Degrees, Minutes and Seconds format
"""
def __init__(self, degree, minute=0, second=0.0):
"""
:param degree: Angle: whole degrees component (floats truncated)
Alt: formatted string '±DDD MM SS.SSS'
:param minute: Angle: whole minutes component (floats truncated)
:param second: Angle: seconds component (floats preserved)
"""
# Convert formatted string 'DDD MM SS.SSS' to DMSAngle
if type(degree) == str:
str_pts = degree.split(' ')
degree = int(str_pts[0])
minute = int(str_pts[1])
second = float(str_pts[2])
# Set sign of object based on sign of any variable
if degree == 0:
if str(degree)[0] == '-':
self.positive = False
elif minute < 0:
self.positive = False
elif second < 0:
self.positive = False
else:
self.positive = True
elif degree > 0:
self.positive = True
else: # degree < 0
self.positive = False
self.degree = abs(int(degree))
self.minute = abs(int(minute))
self.second = abs(second)
def __repr__(self):
if self.positive:
signsymbol = '+'
else:
signsymbol = '-'
return '{DMSAngle: ' + signsymbol + str(self.degree) + 'd ' +\
str(self.minute) + 'm ' + str(self.second) + 's}'
def __add__(self, other):
try:
return dec2dms(self.dec() + other.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __radd__(self, other):
try:
return dec2dms(other.dec() + self.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __sub__(self, other):
try:
return dec2dms(self.dec() - other.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __rsub__(self, other):
try:
return dec2dms(other.dec() - self.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __mul__(self, other):
try:
return dec2dms(self.dec() * other)
except TypeError:
raise TypeError('Multiply only defined between DMSAngle Object '
'and Int or Float')
def __rmul__(self, other):
try:
return dec2dms(other * self.dec())
except TypeError:
raise TypeError('Multiply only defined between DMSAngle Object '
'and Int or Float')
def __truediv__(self, other):
try:
return dec2dms(self.dec() / other)
except TypeError:
raise TypeError('Division only defined between DMSAngle Object '
'and Int or Float')
def __abs__(self):
return DMSAngle(self.degree, self.minute, self.second)
def __neg__(self):
if self.positive:
return DMSAngle(-self.degree, -self.minute, -self.second)
else: # positive == False
return DMSAngle(self.degree, self.minute, self.second)
def __eq__(self, other):
return self.dec() == other.dec()
def __ne__(self, other):
return self.dec() != other.dec()
def __lt__(self, other):
return self.dec() < other.dec()
def __gt__(self, other):
return self.dec() > other.dec()
def __str__(self):
if self.positive:
return (str(self.degree) + ' ' + str(self.minute) + ' '
+ str(self.second))
else:
return ('-' + str(self.degree) + ' ' + str(self.minute) + ' '
+ str(self.second))
def __round__(self, n=None):
if self.positive:
return DMSAngle(self.degree, self.minute, round(self.second, n))
else:
return -DMSAngle(self.degree, self.minute, round(self.second, n))
def rad(self):
"""
Convert to Radians
:return: Radians
:rtype: float
"""
return radians(self.dec())
def dec(self):
"""
Convert to Decimal Degrees (float)
:return: Decimal Degrees
:rtype: float
"""
if self.positive:
return self.degree + (self.minute / 60) + (self.second / 3600)
else:
return -(self.degree + (self.minute / 60) + (self.second / 3600))
def deca(self):
"""
Convert to Decimal Degrees (class)
:return: Decimal Degrees
:rtype: DECAngle
"""
return DECAngle(self.dec())
def hp(self):
"""
Convert to HP Notation (float)
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
if self.positive:
return self.degree + (self.minute / 100) + (self.second / 10000)
else:
return -(self.degree + (self.minute / 100) + (self.second / 10000))
def hpa(self):
"""
Convert to HP Notation (class)
:return: HP Notation (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(self.hp())
def gon(self):
"""
Convert to Gradians (float)
:return: Gradians
:rtype: float
"""
return dec2gon(self.dec())
def gona(self):
"""
Convert to Gradians (class)
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(self.gon())
def ddm(self):
"""
Convert to Degrees, Decimal Minutes Object
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
if self.positive:
return DDMAngle(self.degree, self.minute + (self.second/60))
else:
return -DDMAngle(self.degree, self.minute + (self.second/60))
class DDMAngle(object):
"""
Class for working with angles in Degrees, Decimal Minutes format
"""
def __init__(self, degree, minute=0.0):
"""
:param degree: Angle: whole degrees component (floats truncated)
:param minute: Angle:minutes component (floats preserved)
"""
# Convert formatted string 'DDD MM.MMMM' to DDMAngle
if type(degree) == str:
str_pts = degree.split(' ')
degree = int(str_pts[0])
minute = float(str_pts[1])
# Set sign of object based on sign of any variable
if degree == 0:
if str(degree)[0] == '-':
self.positive = False
elif minute < 0:
self.positive = False
else:
self.positive = True
elif degree > 0:
self.positive = True
else: # degree < 0
self.positive = False
self.degree = abs(int(degree))
self.minute = abs(minute)
def __repr__(self):
if self.positive:
signsymbol = '+'
else:
signsymbol = '-'
return '{DDMAngle: ' + signsymbol + str(self.degree) + 'd ' + \
str(self.minute) + 'm}'
def __add__(self, other):
try:
return dec2ddm(self.dec() + other.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __radd__(self, other):
try:
return dec2ddm(other.dec() + self.dec())
except AttributeError:
raise TypeError('Can only add Angle objects with .dec() method '
'together')
def __sub__(self, other):
try:
return dec2ddm(self.dec() - other.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __rsub__(self, other):
try:
return dec2ddm(other.dec() - self.dec())
except AttributeError:
raise TypeError('Can only subtract Angle objects with .dec() method'
' together')
def __mul__(self, other):
try:
return dec2ddm(self.dec() * other)
except TypeError:
raise TypeError('Multiply only defined between DMSAngle Object '
'and Int or Float')
def __rmul__(self, other):
try:
return dec2ddm(other * self.dec())
except TypeError:
raise TypeError('Multiply only defined between DMSAngle Object '
'and Int or Float')
def __truediv__(self, other):
try:
return dec2ddm(self.dec() / other)
except TypeError:
raise TypeError('Division only defined between DMSAngle Object '
'and Int or Float')
def __abs__(self):
return DDMAngle(self.degree, self.minute)
def __neg__(self):
if self.positive:
return DDMAngle(-self.degree, -self.minute)
else: # sign == -1
return DDMAngle(self.degree, self.minute)
def __eq__(self, other):
return self.dec() == other.dec()
def __ne__(self, other):
return self.dec() != other.dec()
def __lt__(self, other):
return self.dec() < other.dec()
def __gt__(self, other):
return self.dec() > other.dec()
def __str__(self):
if self.positive:
return str(self.degree) + ' ' + str(self.minute)
else:
return '-' + str(self.degree) + ' ' + str(self.minute)
def __round__(self, n=None):
if self.positive:
return DDMAngle(self.degree, round(self.minute, n))
else:
return DDMAngle(-self.degree, -round(self.minute, n))
def rad(self):
"""
Convert to Radians
:return: Radians
:rtype: float
"""
return radians(self.dec())
def dec(self):
"""
Convert to Decimal Degrees (float)
:return: Decimal Degrees
:rtype: float
"""
if self.positive:
return self.degree + (self.minute / 60)
else:
return -(self.degree + (self.minute / 60))
def deca(self):
"""
Convert to Decimal Degrees (class)
:return: Decimal Degrees
:rtype: DECAngle
"""
return DECAngle(self.dec())
def hp(self):
"""
Convert to HP Notation (float)
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
minute_int, second = divmod(self.minute, 1)
if self.positive:
return self.degree + (minute_int / 100) + (second * 0.006)
else:
return -(self.degree + (minute_int / 100) + (second * 0.006))
def hpa(self):
"""
Convert to HP Notation (class)
:return: HP Notation (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(self.hp())
def gon(self):
"""
Convert to Gradians (float)
:return: Gradians
:rtype: float
"""
return dec2gon(self.dec())
def gona(self):
"""
Convert to Gradians (class)
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(self.gon())
def dms(self):
"""
Convert to Degrees, Minutes, Seconds Object
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
minute_int, second = divmod(self.minute, 1)
if self.positive:
return DMSAngle(self.degree, int(minute_int), second * 60)
else:
return -DMSAngle(self.degree, int(minute_int), second * 60)
# Functions converting from Decimal Degrees (float) to other formats
def dec2hp(dec):
"""
Converts Decimal Degrees to HP Notation (float)
:param dec: Decimal Degrees
:type dec: float
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
minute, second = divmod(abs(dec) * 3600, 60)
degree, minute = divmod(minute, 60)
hp = degree + (minute / 100) + (second / 10000)
hp = round(hp, 16)
return hp if dec >= 0 else -hp
def dec2hpa(dec):
"""
Converts Decimal Degrees to HP Angle Object
:param dec: Decimal Degrees
:type dec: float
:return: HP Angle Object (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(dec2hp(dec))
def dec2gon(dec):
"""
Converts Decimal Degrees to Gradians
:param dec: Decimal Degrees
:type dec: float
:return: Gradians
:rtype: float
"""
return 10/9 * dec
def dec2gona(dec):
"""
Converts Decimal Degrees to Gradians (class)
:param dec: Decimal Degrees
:type dec: float
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(dec2gon(dec))
def dec2dms(dec):
"""
Converts Decimal Degrees to Degrees, Minutes, Seconds Object
:param dec: Decimal Degrees
:type dec: float
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
minute, second = divmod(abs(dec) * 3600, 60)
degree, minute = divmod(minute, 60)
return (DMSAngle(degree, minute, second) if dec >= 0
else DMSAngle(-degree, minute, second))
def dec2ddm(dec):
"""
Converts Decimal Degrees to Degrees, Decimal Minutes Object
:param dec: Decimal Degrees
:type dec: float
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
minute, second = divmod(abs(dec) * 3600, 60)
degree, minute = divmod(minute, 60)
minute = minute + (second / 60)
return DDMAngle(degree, minute) if dec >= 0 else DDMAngle(-degree, minute)
# Functions converting from Hewlett-Packard (HP) format to other formats
def hp2dec(hp):
"""
Converts HP Notation to Decimal Degrees
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Decimal Degrees
:rtype: float
"""
# Check if 1st and 3rd decimal place greater than 5 (invalid HP Notation)
hp = float(hp)
hp_dec_str = f'{hp:.17f}'.split('.')[1]
if int(hp_dec_str[0]) > 5:
raise ValueError(f'Invalid HP Notation: 1st decimal place greater '
f'than 5: {hp}')
if len(hp_dec_str) > 2:
if int(hp_dec_str[2]) > 5:
raise ValueError(f'Invalid HP Notation: 3rd decimal place greater '
f'than 5: {hp}')
degmin, second = divmod(abs(hp) * 1000, 10)
degree, minute = divmod(degmin, 100)
dec = degree + (minute / 60) + (second / 360)
dec = round(dec, 16)
return dec if hp >= 0 else -dec
def hp2deca(hp):
"""
Converts HP Notation to DECAngle Object
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Decimal Degrees Object
:rtype: DECAngle
"""
return DECAngle(hp2dec(hp))
def hp2rad(hp):
"""
Converts HP Notation to radians
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: radians
:rtype: float
"""
return radians(hp2dec(hp))
def hp2gon(hp):
"""
Converts HP Notation to Gradians
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Gradians
:rtype: float
"""
return dec2gon(hp2dec(hp))
def hp2gona(hp):
"""
Converts HP Notation to Gradians (class)
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Gradians
:rtype: GONAngle
"""
return GONAngle(hp2gon(hp))
def hp2dms(hp):
"""
Converts HP Notation to Degrees, Minutes, Seconds Object
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
degmin, second = divmod(abs(hp) * 1000, 10)
degree, minute = divmod(degmin, 100)
return (DMSAngle(degree, minute, second * 10) if hp >= 0
else DMSAngle(-degree, minute, second * 10))
def hp2ddm(hp):
"""
Converts HP Notation to Degrees, Decimal Minutes Object
:param hp: HP Notation (DDD.MMSSSS)
:type hp: float
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
degmin, second = divmod(abs(hp) * 1000, 10)
degree, minute = divmod(degmin, 100)
minute = minute + (second / 6)
return DDMAngle(degree, minute) if hp >= 0 else DDMAngle(-degree, minute)
# Functions converting from Gradians format to other formats
def gon2dec(gon):
"""
Converts Gradians to Decimal Degrees
:param gon: Gradians
:type gon: float
:return: Decimal Degrees
:rtype: float
"""
return 9/10 * gon
def gon2deca(gon):
"""
Converts Gradians to DECAngle Object
:param gon: Gradians
:type gon: float
:return: Decimal Degrees Object
:rtype: DECAngle
"""
return DECAngle(gon2dec(gon))
def gon2hp(gon):
"""
Converts Gradians to HP Notation (float)
:param gon: Gradians
:type gon: float
:return: HP Notation (DDD.MMSSSS)
:rtype: float
"""
return dec2hp(gon2dec(gon))
def gon2hpa(gon):
"""
Converts Gradians to HP Angle Object
:param gon: Gradians
:type gon: float
:return: HP Angle Object (DDD.MMSSSS)
:rtype: HPAngle
"""
return HPAngle(gon2hp(gon))
def gon2rad(gon):
"""
Converts Gradians to radians
:param gon: Gradians
:type gon: float
:return: Radians
:rtype: float
"""
return radians(gon2dec(gon))
def gon2dms(gon):
"""
Converts Gradians to Degrees, Minutes, Seconds Object
:param gon: Gradians
:type gon: float
:return: Degrees, Minutes, Seconds Object
:rtype: DMSAngle
"""
return dec2dms(gon2dec(gon))
def gon2ddm(gon):
"""
Converts Gradians to Degrees, Decimal Minutes Object
:param gon: Gradians
:type gon: float
:return: Degrees, Decimal Minutes Object
:rtype: DDMAngle
"""
return dec2ddm(gon2dec(gon))
# Miscellaneous other useful functions
def dd2sec(dd):
"""
Converts angle in decimal degrees to angle in seconds
:param dd: Decimal Degrees
:return: Seconds
"""
minute, second = divmod(abs(dd) * 3600, 60)
degree, minute = divmod(minute, 60)
sec = (degree * 3600) + (minute * 60) + second
return sec if dd >= 0 else -sec
def dec2hp_v(dec):
minute, second = divmod(abs(dec) * 3600, 60)
degree, minute = divmod(minute, 60)
hp = degree + (minute / 100) + (second / 10000)
hp[dec <= 0] = -hp[dec <= 0]
return hp
def hp2dec_v(hp):
degmin, second = divmod(abs(hp) * 1000, 10)
degree, minute = divmod(degmin, 100)
dec = degree + (minute / 60) + (second / 360)
dec[hp <= 0] = -dec[hp <= 0]
return dec
def angular_typecheck(angle):
# Converts Angle Objects to Decimal Degrees (float) for computations
supported_types = [DMSAngle, DDMAngle, DECAngle, HPAngle, GONAngle]
if type(angle) in supported_types:
return angle.dec()
else:
return float(angle)
```
#### File: GeodePy/geodepy/gnss.py
```python
import sys
from numpy import zeros
from geodepy.angles import DMSAngle
def read_sinex_estimate(file):
"""This function reads in the SOLUTION/ESTIMATE block of a SINEX file. It
returns estimate, a list of tuples:
estimate = [(code, soln, refEpoch, staX, staY, staZ, staX_sd, staY_sd,
staZ_sd[, velX, velY, velZ, velX_sd, velY_sd, velZ_sd])...]
where:
* code is the stations's 4-character ID
* soln is the segment of the stations's time series
* refEpoch is the epoch of the solution in the form YY:DOY:SSSSS (YY
is the two digit year, DOY is day of year, and SSSSS is the time of
day in seconds
* sta[XYZ] is the station coordinates in the Cartesian reference frame
* sta[XYZ]_sd is the standard deviation of the station coordinates in
the Cartesian reference frame
* vel[XYZ] is the station velocity in the Cartesian reference frame
* vel[XYZ]_sd is the standard deviation of the station velocity in the
Cartesian reference frame
Velocities are not included in all SINEX files and so are only returned if
present.
:param file: the input SINEX file
:return: estimate
"""
# Create data structures and set variables
lines = []
estimate = []
velocities = False
go = False
code = ''
soln = ''
epoch = ''
stax = ''
stay = ''
staz = ''
stax_sd = ''
stay_sd = ''
staz_sd = ''
velx = ''
vely = ''
velz = ''
velx_sd = ''
vely_sd = ''
velz_sd = ''
# Read the SOLUTION/ESTIMATE block into a list and determine if there is
# any velocity information
with open(file) as f:
for line in f:
if line[:18] == '-SOLUTION/ESTIMATE':
break
if go and line[:11] == '*INDEX TYPE':
pass
elif go:
if line[7:10] == 'VEL':
velocities = True
lines.append(line)
if line[:18] == '+SOLUTION/ESTIMATE':
go = True
for line in lines:
typ = line[7:11]
if typ == 'STAX':
code = line[14:18]
soln = line[23:26].lstrip()
epoch = line[27:39]
stax = float(line[47:68])
stax_sd = float(line[69:80])
elif typ == 'STAY':
stay = float(line[47:68])
stay_sd = float(line[69:80])
elif typ == 'STAZ':
staz = float(line[47:68])
staz_sd = float(line[69:80])
if not velocities:
info = (code, soln, epoch, stax, stay, staz, stax_sd, stay_sd,
staz_sd)
estimate.append(info)
elif typ == 'VELX':
velx = float(line[47:68])
velx_sd = float(line[69:80])
elif typ == 'VELY':
vely = float(line[47:68])
vely_sd = float(line[69:80])
elif typ == 'VELZ':
velz = float(line[47:68])
velz_sd = float(line[69:80])
info = (code, soln, epoch, stax, stay, staz, stax_sd, stay_sd,
staz_sd, velx, vely, velz, velx_sd, vely_sd, velz_sd)
estimate.append(info)
return estimate
def read_sinex_matrix(file):
"""This function reads in the SOLUTION/MATRIX_ESTIMATE block of a SINEX
file. It returns matrix, a list of tuples:
matrix = [(code, soln, var_x, covar_xy, covar_xz, var_y, covar_yz,
var_z[, var_v_x, covar_v_xy, covar_v_xz, var_v_y, covar_v_yz,
var_v_z])...]
where:
* code is the stations's 4-character ID
* soln is the segment of the stations's time series
* var_x is the variance in the X coordinate
* covar_xy is the covariance between the X and the Y coordinates
* covar_xz is the covariance between the X and the Z coordinates
* var_y is the variance in the Y coordinate
* covar_yz is the covariance between the Y and the Z coordinates
* var_z is the variance in the Z coordinate
* var_v_x is the variance in the X velocity
* covar_v_xy is the covariance between the X and the Y velocities
* covar_v_xz is the covariance between the X and the Z velocities
* var_v_y is the variance in the Y velocity
* covar_v_yz is the covariance between the Y and the Z velocities
* var_v_z is the variance in the Z velocity
Velocities are not included in all SINEX files and so their VCV information
is only returned if they are present.
:param file: the input SINEX file
:return: matrix
"""
# Read in the codes (station names) and solutions, and check for velocities
data = read_sinex_estimate(file)
code = []
soln = []
velocities = False
for station in data:
code.append(station[0])
soln.append(station[1])
if len(data[0]) == 15:
velocities = True
# Read the SOLUTION/MATRIX_ESTIMATE block into a list and determine if the
# matrix is upper or lower triangular
lines = []
lower_triangular = False
go = False
with open(file) as f:
for line in f:
if line[:25] == '-SOLUTION/MATRIX_ESTIMATE':
break
if go and line[:12] == '*PARA1 PARA2':
pass
elif go:
lines.append(line)
if line[:25] == '+SOLUTION/MATRIX_ESTIMATE':
if line[26] == 'L':
lower_triangular = True
go = True
# Create an array containing the matrix elements
if velocities:
n = 6 * int(len(code))
else:
n = 3 * int(len(code))
element = zeros((n, n))
matrix = []
for line in lines:
col = line.rstrip().split()
for i in range(2, len(col)):
element[int(col[0]) - 1][int(col[1]) + i - 3] = float(col[i])
if velocities:
if lower_triangular:
for i in range(len(code)):
info = (code[i], soln[i], element[6 * i][6 * i],
element[6 * i + 1][6 * i],
element[6 * i + 1][6 * i + 1],
element[6 * i + 2][6 * i],
element[6 * i + 2][6 * i + 1],
element[6 * i + 2][6 * i + 2],
element[6 * i + 3][6 * i + 3],
element[6 * i + 4][6 * i + 3],
element[6 * i + 4][6 * i + 4],
element[6 * i + 5][6 * i + 3],
element[6 * i + 5][6 * i + 4],
element[6 * i + 5][6 * i + 5])
matrix.append(info)
else:
for i in range(len(code)):
info = (code[i], soln[i], element[6 * i][6 * i],
element[6 * i][6 * i + 1], element[6 * i][6 * i + 2],
element[6 * i + 1][6 * i + 1],
element[6 * i + 1][6 * i + 2],
element[6 * i + 2][6 * i + 2],
element[6 * i + 3][6 * i + 3],
element[6 * i + 3][6 * i + 4],
element[6 * i + 3][6 * i + 5],
element[6 * i + 4][6 * i + 4],
element[6 * i + 4][6 * i + 5],
element[6 * i + 5][6 * i + 5])
matrix.append(info)
else:
if lower_triangular:
for i in range(len(code)):
info = (code[i], soln[i], element[3 * i][3 * i],
element[3 * i + 1][3 * i],
element[3 * i + 1][3 * i + 1],
element[3 * i + 2][3 * i],
element[3 * i + 2][3 * i + 1],
element[3 * i + 2][3 * i + 2])
matrix.append(info)
else:
for i in range(len(code)):
info = (code[i], soln[i], element[3 * i][3 * i],
element[3 * i][3 * i + 1], element[3 * i][3 * i + 2],
element[3 * i + 1][3 * i + 1],
element[3 * i + 1][3 * i + 2],
element[3 * i + 2][3 * i + 2])
matrix.append(info)
return matrix
def read_sinex_sites(file):
"""This function reads in the SITE/ID block of a SINEX file. It returns
sites, a list of tuples:
sites = [(site, point, domes, obs, station_description, lon, lat, h)]
where:
* site is the site code
* point is the site's point code
* domes is the site's dome number
* obs is the observation technique
* station_description is a free format desciption of the site
* lon is the approximate longitude of the site as a DMSAngle object
* lat is the approximate latitude of the site as a DMSAngle object
* h is the approximate height of the site
:param file: the input SINEX file
:return: sites
"""
# Read the SITE/ID block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:8] == '-SITE/ID':
break
if go and line[:8] == '*CODE PT':
pass
elif go:
lines.append(line)
if line[:8] == '+SITE/ID':
go = True
sites = []
for line in lines:
site = line[1:5]
point = line[6:8].lstrip()
domes = line[9:18]
obs = line[19:20]
station_description = line[21:43].lstrip()
lon = DMSAngle(line[44:55].lstrip())
lat = DMSAngle(line[56:67].lstrip())
h = float(line[67:73])
info = (site, point, domes, obs, station_description, lon, lat, h)
sites.append(info)
return sites
def read_disconts(file):
"""This function reads in the SOLUTION/DISCONTINUITY block of a
SINEX file. It returns disconts , a list of tuples:
sites = [(site, code1, point, code2, start, end, type)]
where:
* site is the site code
* code1 is unknown
* point is the site's point code
* code2 is unknown
* start is the start time for the point code in YY:DOY:SECOD
* end is the end time for the point code in YY:DOY:SECOD
* type is the type of discontinuity; P for position or V for
velocity
I could not find the format description for this block.
:param file: the input discontinuities file
:return: disconts
"""
# Read the SOLUTION/DISCONTINUITY block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:23] == '-SOLUTION/DISCONTINUITY':
break
elif go:
lines.append(line)
if line[:23] == '+SOLUTION/DISCONTINUITY':
go = True
disconts = []
for line in lines:
site = line[1:5]
code1 = line[5:8].lstrip()
point = line[8:13].lstrip()
code2 = line[14:15]
start = line[16:28]
end = line[29:41]
type = line[42:43]
info = (site, code1, point, code2, start, end, type)
disconts.append(info)
return disconts
def read_solution_epochs(file):
"""This function reads in the SOLUTION/EPOCHS block of a SINEX file.
It returns epochs, a list of tuples:
epochs = [(site, point, sol, obs, start, end, mean)]
where:
* site is the site code
* point is the site's point code
* sol is the solution number at a site/point
* obs is the observation technique
* start is the start time for the solution in YY:DOY:SECOD
* end is the end time for the solution in YY:DOY:SECOD
* mean is the mean time for the solution in YY:DOY:SECOD
:param file: the input SINEX file
:return: epochs
"""
# Read the SOLUTION/EPOCHS block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:16] == '-SOLUTION/EPOCHS':
break
if go and line[:8] == '*Code PT':
pass
elif go:
lines.append(line)
if line[:16] == '+SOLUTION/EPOCHS':
go = True
epochs = []
# Parse each line, create a tuple and add it to the list
for line in lines:
site = line[1:5]
point = line[6:8].lstrip()
sol = line[9:13].lstrip()
obs = line[14:15]
start = line[16:28]
end = line[29:41]
mean = line[42:55].rstrip()
info = (site, point, sol, obs, start, end, mean)
epochs.append(info)
return epochs
```
#### File: GeodePy/geodepy/height.py
```python
import geodepy.constants as cons
import geodepy.geodesy as gg
import gdal
import numpy as np
from scipy.interpolate import griddata
import math as m
#___________________________________________________________________________#
# Interpolation functions
def interp_file(Lat,Long,file):
# Import the DOVPM file
f = gdal.Open(file)
# load band (akin to a variable in dataset)
band = f.GetRasterBand(1)
# get the pixel width, height, etc.
transform = f.GetGeoTransform()
# Grid resolution (known)
res=transform[1]
# convert lat,lon to row,col
column = (Long - transform[0]) / transform[1]
row = (Lat - transform[3]) / transform[5]
# get pixel values surrounding data point
Surrounding_data=(band.ReadAsArray(np.floor(column-2), np.floor(row-2), 5, 5))
# convert row,col back to north,east
Long_c = transform[0] + np.floor(column) * res
Lat_c = transform[3] - np.floor(row) * res
# set up matrices for interpolation
count=-1
pos=np.zeros((25,2))
Surrounding_data_v=np.zeros((25,1))
for k in range(-2,3):
for j in range(-2,3):
count=count+1
pos[count]=(Long_c+j*res,Lat_c-k*res)
Surrounding_data_v[count]=Surrounding_data[k+2,j+2]
interp_val=griddata(pos,Surrounding_data_v,(Long,Lat),method='cubic')
return interp_val
#___________________________________________________________________________#
# Functions to handle the conversions from one height to another
def GPS_to_AVWS(Lat,Long,GPS_H):
zeta=interp_file(Lat, Long, cons.file_AVWS) # AVWS file
zeta_std=interp_file(Lat, Long, cons.file_AVWS_STD) # AVWS STD file
NORMAL_H=GPS_H-zeta
return [NORMAL_H,zeta_std]
def AVWS_to_GPS(Lat,Long,AVWS_H):
zeta=interp_file(Lat, Long, cons.file_AVWS) # AVWS file
zeta_std=interp_file(Lat, Long, cons.file_AVWS_STD) # AVWS STD file
GPS_H=AVWS_H+zeta
return [GPS_H,zeta_std]
def AHD_to_AVWS(Lat,Long,AHD_H):
# Convert to GPS
GPS_H=AHD_H+interp_file(Lat, Long, cons.file_AG2020) # AUSGEOID2020 file
# Convert to AVWS
Normal_H=GPS_H-interp_file(Lat, Long, cons.file_AVWS) # AVWS file
return [Normal_H]
def GPS_to_AHD(Lat,Long,GPS_H):
N=interp_file(Lat, Long, cons.file_AG2020) # AUSGEOID2020 file
N_std=interp_file(Lat, Long, cons.file_AG2020_STD) # AUSGEOID2020 STD file
AHD_H=GPS_H-N
return [AHD_H,N_std]
def AHD_to_GPS(Lat,Long,AHD_H):
N=interp_file(Lat, Long, cons.file_AG2020) # AUSGEOID2020 file
N_std=interp_file(Lat, Long, cons.file_AG2020_STD) # AUSGEOID2020 STD file
GPS_H=AHD_H+N
return [GPS_H,N_std]
def AVWS_to_AHD(Lat,Long,Normal_H):
# Convert to GPS
GPS_H=Normal_H+interp_file(Lat, Long, cons.file_AVWS) # AVWS file
# Convert to AHD
AHD_H=GPS_H-interp_file(Lat, Long, cons.file_AG2020) # AUSGEOID2020 file
return [AHD_H]
def DOV(Lat,Long):
# Convert to GPS
DOV_PM=interp_file(Lat, Long, cons.file_DOV_PM) # AVWS file
# Convert to AHD
DOV_PV=interp_file(Lat, Long, cons.file_DOV_PV) # AUSGEOID2020 file
return [DOV_PM,DOV_PV]
def GPS_to_AUSGeoid98(Lat,Long,GPS_H):
N=interp_file(Lat,Long,cons.file_AG98) # AUSGEOID98 file
AHD_H=GPS_H-N
return [AHD_H]
def AUSGeoid98_to_GPS(Lat,Long,AHD_H):
N=interp_file(Lat,Long,cons.file_AG98) # AUSGEOID98 file
GPS_H=AHD_H+N
return [GPS_H]
def GPS_to_AUSGeoid09(Lat,Long,GPS_H):
N=interp_file(Lat,Long,cons.file_AG09) # AUSGEOID09 file
AHD_H=GPS_H-N
return [AHD_H]
def AUSGeoid09_to_GPS(Lat,Long,AHD_H):
N=interp_file(Lat,Long,cons.file_AG09) # AUSGEOID09 file
GPS_H=AHD_H+N
return [GPS_H]
def DOV_09(Lat,Long):
# Interp PM
DOV_PM=interp_file(Lat,Long,cons.file_AG09_DOV_PM) # AGQG09 DOV file
# Interp PV
DOV_PV=interp_file(Lat,Long,cons.file_AG09_DOV_PV) # AGQG09 DOV file
return [DOV_PM,DOV_PV]
def DOV_98(Lat,Long):
# Interp PM
DOV_PM=interp_file(Lat,Long,cons.file_AG98_DOV_PM) # AGQG98 DOV file
# Interp PV
DOV_PV=interp_file(Lat,Long,cons.file_AG98_DOV_PV) # AGQG98 DOV file
return [DOV_PM,DOV_PV]
def mean_normal_grav(Lat,h):
# GRS 80 constants
a=6378137
b=6356752.3141
omega=7292115*(10**-11)
e2=0.00669438002290
GM=3986005*10**8
k=0.001931851353
# GRS80 normal gravity
EllGrav=(10**5)*9.7803267715*(1+k*(np.sin(Lat*np.pi/180)**2))/np.sqrt(1-e2*(np.sin(Lat*np.pi/180)**2))
FA=-((2*(EllGrav/a)*(1+(a-b)/a + omega**2*a**2*b/GM - 2*(a-b)/a*(np.sin(Lat*np.pi/180)**2))*(h**2)/2-3*(EllGrav/a**2)*(h**3)/3)/h)
mean_normal_g=(EllGrav+FA)*(10**-5)
return mean_normal_g
def normal_grav(Lat,h):
# GRS 80 constants
a=6378137
b=6356752.3141
omega=7292115*(10**-11)
e2=0.00669438002290
GM=3986005*10**8
k=0.001931851353
# GRS80 normal gravity
EllGrav=(10**5)*9.7803267715*(1+k*(np.sin(Lat*np.pi/180)**2))/np.sqrt(1-e2*(np.sin(Lat*np.pi/180)**2))
FA=-(2*EllGrav*h/a)*(1+(a-b)/a+omega**2*a**2*b/GM-2*(a-b)/a*(np.sin(Lat*np.pi/180)**2))+3*(EllGrav*h**2)/(a**2)
normal_g=(EllGrav+FA)*(10**-5)
return normal_g
def mean_surface_grav(Lat_A,Long_A,H_A,Lat_B,Long_B,H_B):
Surf_Grav_A=interp_grav(Lat_A,Long_A)*(10**-5)+normal_grav(Lat_A,H_A)+0.0419*2.67*H_A*(10**-5)
Surf_Grav_B=interp_grav(Lat_B,Long_B)*(10**-5)+normal_grav(Lat_B,H_B)+0.0419*2.67*H_B*(10**-5)
mean_g=(Surf_Grav_A+Surf_Grav_B)/2
return mean_g
def interp_grav(Lat,Long):
# Grid resolution (known)
res=1.0/60
# open geotiff file
f = gdal.Open(cons.file_GRAV_BA)
# load band (akin to a variable in dataset)
band = f.GetRasterBand(1)
# get the pixel width, height, etc.
transform = f.GetGeoTransform()
# convert lat,lon to row,col
column = (Long - transform[0]) / transform[1]
row = (Lat - transform[3]) / transform[5]
# get pixel values surrounding data point
Surrounding_data=(band.ReadAsArray(np.floor(column-2), np.floor(row-2), 5, 5))
# convert row,col back to north,east
Long_c = transform[0] + np.floor(column) * res
Lat_c = transform[3] - np.floor(row) * res
# set up matrices for interpolation
count=-1
pos=np.zeros((25,2))
Surrounding_data_v=np.zeros((25,1))
for k in range(-2,3):
for j in range(-2,3):
count=count+1
pos[count]=(Long_c+j*res,Lat_c-k*res)
Surrounding_data_v[count]=Surrounding_data[k+2,j+2]
interp_g=griddata(pos,Surrounding_data_v,(Long,Lat))
return interp_g
def normal_correction(Lat_A,Long_A,H_A,Lat_B,Long_B,H_B):
# ellipsoidal gravity at 45 deg. Lat
Gamma_0=9.8061992115
# Normal Gravity at the point
normal_g_A=mean_normal_grav(Lat_A,H_A)
# print normal_g_A
normal_g_B=mean_normal_grav(Lat_B,H_B)
# print normal_g_B
dn=H_B-H_A
g=mean_surface_grav(Lat_A,Long_A,H_A,Lat_B,Long_B,H_B)
# print g
NC=(dn*(g-Gamma_0)/Gamma_0)+H_A*(normal_g_A-Gamma_0)/Gamma_0-H_B*(normal_g_B-Gamma_0)/Gamma_0
return NC,g
def normal_orthometric_correction(lat1, lon1, H1, lat2, lon2, H2):
"""
Computes the normal-orthometric correction based on Heck (2003).
See Standard for New Zealand Vertical Datum 2016, Section 3.3
:param lat1: Latitude at Stn1
:param lon1: Longitude at Stn1
:param H1: Physical Height at Stn1
:param lat2: Latitude at Stn2
:param lon2: longitude at Stn2
:param H2: Physical Height at Stn2
:return: normal-orthometric correction
"""
f_ng = cons.grs80_ngf
m_rad = cons.grs80.meanradius
mid_height = (H1 + H2) / 2
mid_lat = m.radians((lat1 + lat2) / 2)
vinc_inv = gg.vincinv(lat1, lon1, lat2, lon2)
dist = vinc_inv[0]
az = vinc_inv[1]
noc = - f_ng / m_rad * mid_height * m.sin(2.0 * mid_lat) * m.cos(m.radians(az)) * dist
return noc
```
#### File: geodepy/tests/test_geodesy.py
```python
import unittest
import os.path
import numpy as np
import numpy.lib.recfunctions as rfn
from geodepy.convert import (hp2dec, dec2hp, rect2polar, polar2rect,
grid2geo, llh2xyz, DMSAngle)
from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu
class TestGeodesy(unittest.TestCase):
def test_enu2xyz(self):
MOBS_MGA2020 = (55, 321820.085, 5811181.510, 40.570)
MOBS_MGA1994 = (55, 321819.594, 5811180.038, 40.659)
# Convert UTM Projection Coordinates to Geographic Coordinates
MOBS_GDA2020 = grid2geo(MOBS_MGA2020[0], MOBS_MGA2020[1], MOBS_MGA2020[2])
MOBS_GDA1994 = grid2geo(MOBS_MGA1994[0], MOBS_MGA1994[1], MOBS_MGA1994[2])
# Convert Geographic Coordinates to Cartesian XYZ Coordinates
MOBS_GDA2020_XYZ = llh2xyz(MOBS_GDA2020[0], MOBS_GDA2020[1], MOBS_MGA2020[3])
MOBS_GDA1994_XYZ = llh2xyz(MOBS_GDA1994[0], MOBS_GDA1994[1], MOBS_MGA1994[3])
# Generate Vector Between UTM Projection Coordinates
mga_vector = [MOBS_MGA2020[1] - MOBS_MGA1994[1],
MOBS_MGA2020[2] - MOBS_MGA1994[2],
MOBS_MGA2020[3] - MOBS_MGA1994[3]]
# Generate Vector Between Cartesian XYZ Coordinates
xyz_vector = (MOBS_GDA2020_XYZ[0] - MOBS_GDA1994_XYZ[0],
MOBS_GDA2020_XYZ[1] - MOBS_GDA1994_XYZ[1],
MOBS_GDA2020_XYZ[2] - MOBS_GDA1994_XYZ[2])
# Rotate UTM Projection Vector by Grid Convergence
grid_dist, grid_brg = rect2polar(mga_vector[0], mga_vector[1])
local_east, local_north = polar2rect(grid_dist, grid_brg - MOBS_GDA2020[3])
local_vector = (local_east, local_north, mga_vector[2])
# Calculate XYZ Vector using Local Vector Components
x, y, z = enu2xyz(MOBS_GDA2020[0], MOBS_GDA2020[1], *local_vector)
self.assertAlmostEqual(x, xyz_vector[0], 4)
self.assertAlmostEqual(y, xyz_vector[1], 4)
self.assertAlmostEqual(z, xyz_vector[2], 4)
# Calculate Local Vector using XYZ Vector Components
e, n, u = xyz2enu(MOBS_GDA2020[0], MOBS_GDA2020[1], *xyz_vector)
self.assertAlmostEqual(e, local_vector[0], 4)
self.assertAlmostEqual(n, local_vector[1], 4)
self.assertAlmostEqual(u, local_vector[2], 4)
def test_vincinv(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
lon1 = hp2dec(144.25295244)
lat1_DMS = DMSAngle(-37, 57, 3.7203)
lon1_DMS = DMSAngle(144, 25, 29.5244)
# Buninyong
lat2 = hp2dec(-37.39101561)
lon2 = hp2dec(143.55353839)
lat2_DMS = DMSAngle(-37, 39, 10.1561)
lon2_DMS = DMSAngle(143, 55, 35.3839)
# Test Decimal Degrees Input
ell_dist, azimuth1to2, azimuth2to1 = vincinv(lat1, lon1, lat2, lon2)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# additional test case:
pl1 = (-29.85, 140.71666666666667)
pl2 = (-29.85, 140.76666666666667)
ell_dist, azimuth1to2, azimuth2to1 = vincinv(pl1[0], pl1[1], pl2[0], pl2[1])
self.assertEqual(round(ell_dist, 3), 4831.553)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 90.004480)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 269.591520)
test2 = vincinv(lat1, lon1, lat1, lon1)
self.assertEqual(test2, (0, 0, 0))
# Test DMSAngle Input
ell_dist, azimuth1to2, azimuth2to1 = vincinv(lat1_DMS, lon1_DMS,
lat2_DMS, lon2_DMS)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
test2 = vincinv(lat1_DMS, lon1_DMS, lat1_DMS, lon1_DMS)
self.assertEqual(test2, (0, 0, 0))
# Test DDMAngle Input
(ell_dist,
azimuth1to2,
azimuth2to1) = vincinv(lat1_DMS.ddm(), lon1_DMS.ddm(),
lat2_DMS.ddm(), lon2_DMS.ddm())
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
test2 = vincinv(lat1_DMS.ddm(), lon1_DMS.ddm(),
lat1_DMS.ddm(), lon1_DMS.ddm())
self.assertEqual(test2, (0, 0, 0))
def test_vincdir(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
lon1 = hp2dec(144.25295244)
lat1_DMS = DMSAngle(-37, 57, 3.7203)
lon1_DMS = DMSAngle(144, 25, 29.5244)
# To Buninyong
azimuth1to2 = hp2dec(306.520537)
azimuth1to2_DMS = DMSAngle(306, 52, 5.37)
ell_dist = 54972.271
# Test Decimal Degrees Input
lat2, lon2, azimuth2to1 = vincdir(lat1, lon1, azimuth1to2, ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(lon2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# Test DMSAngle Input
lat2, long2, azimuth2to1 = vincdir(lat1_DMS, lon1_DMS,
azimuth1to2_DMS, ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(long2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# Test DDMAngle Input
lat2, long2, azimuth2to1 = vincdir(lat1_DMS.ddm(), lon1_DMS.ddm(),
azimuth1to2_DMS.ddm(), ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(long2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_vincinv_utm(self):
# Flinders Peak (UTM 55)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# Buninyong (UTM 55)
zone2 = 55
east2 = 228854.0513
north2 = 5828259.0384
# Buninyong (UTM 54)
zone3 = 54
east3 = 758173.7973
north3 = 5828674.3402
# Test Coordinates in Zone 55 only
grid_dist, grid1to2, grid2to1, lsf = vincinv_utm(zone1, east1, north1,
zone2, east2, north2)
self.assertAlmostEqual(lsf, 1.00036397, 8)
self.assertAlmostEqual(grid_dist, 54992.279, 3)
self.assertAlmostEqual(dec2hp(grid1to2), 305.17017259, 7)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
# Test Coordinates in Different Zones (55 and 54)
# (Point 2 Grid Bearing Different (Zone 54 Grid Bearing))
grid_dist, grid1to2, grid2to1, lsf = vincinv_utm(zone1, east1, north1,
zone3, east3, north3)
self.assertAlmostEqual(lsf, 1.00036397, 8)
self.assertAlmostEqual(grid_dist, 54992.279, 3)
self.assertAlmostEqual(dec2hp(grid1to2), 305.17017259, 7)
self.assertAlmostEqual(dec2hp(grid2to1), 128.57444307, 7)
def test_vincdir_utm(self):
# Flinders Peak (UTM 55)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# Grid Dimensions to Point 2 (Buninyong)
grid_dist = 54992.279
grid1to2 = hp2dec(305.17017259)
grid1to2_DMS = DMSAngle(305, 17, 1.7259)
# Test Decimal Degrees Input
(zone2, east2, north2,
grid2to1, lsf) = vincdir_utm(zone1, east1, north1,
grid1to2, grid_dist)
self.assertEqual(zone2, zone1)
self.assertAlmostEqual(east2, 228854.0513, 3)
self.assertAlmostEqual(north2, 5828259.0384, 3)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
self.assertAlmostEqual(lsf, 1.00036397, 8)
# Test DMSAngle Input
(zone2, east2, north2,
grid2to1, lsf) = vincdir_utm(zone1, east1, north1,
grid1to2_DMS, grid_dist)
self.assertEqual(zone2, zone1)
self.assertAlmostEqual(east2, 228854.0513, 3)
self.assertAlmostEqual(north2, 5828259.0384, 3)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
self.assertAlmostEqual(lsf, 1.00036397, 8)
def test_equality_vincentys(self):
# Test multiple point-to-point vincinv calculations
abs_path = os.path.abspath(os.path.dirname(__file__))
test_geo_coords =\
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat1', 'long1'],
usecols=('lat1', 'long1'))
test_geo_coord2 = \
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat2', 'long2'],
usecols=('lat2', 'long2'))
# Form array with point pairs from test file
test_pairs = rfn.merge_arrays([test_geo_coords, np.roll(test_geo_coord2, 1)], flatten=True)
# Calculate Vincenty's Inverse Result using Lat, Long Pairs
vincinv_result = np.array(list(vincinv(*x) for x in test_pairs[['lat1', 'long1', 'lat2', 'long2']]))
# Calculate Vincenty's Direct Result using Results from Inverse Function
vincdir_input = rfn.merge_arrays([test_geo_coords, vincinv_result[:, 1], vincinv_result[:, 0]], flatten=True)
vincdir_input.dtype.names = ['lat1', 'long1', 'az1to2', 'ell_dist']
vincdir_result = np.array(list(vincdir(*x) for x in vincdir_input[['lat1', 'long1', 'az1to2', 'ell_dist']]))
np.testing.assert_almost_equal(test_pairs['lat2'],
vincdir_result[:, 0], decimal=8)
np.testing.assert_almost_equal(test_pairs['long2'],
vincdir_result[:, 1], decimal=8)
np.testing.assert_almost_equal(vincinv_result[:, 2],
vincdir_result[:, 2])
def test_vincinv_edgecases(self):
lat1 = -32.153892
lon1 = -15.394827
lat2 = -31.587369
lon2 = -13.487739
gdist, az12, az21 = vincinv(lat1, lon1, lat2, lon2)
lon1 = lon1 + 14
lon2 = lon2 + 14
gdist_2, az12_2, az21_2 = vincinv(lat1, lon1, lat2, lon2)
self.assertEqual(gdist, gdist_2)
self.assertEqual(az12, az12_2)
self.assertEqual(az21, az21_2)
if __name__ == '__main__':
unittest.main()
```
#### File: GeodePy/Standalone/mga2gda.py
```python
from decimal import *
from math import sqrt, log, degrees, sin, cos, sinh, cosh, atan
import os
import argparse
import csv
# Universal Transverse Mercator Projection Parameters
proj = [6378137, Decimal('298.25722210088'), 500000,
10000000, Decimal('0.9996'), 6, -177]
# Ellipsoidal Constants
f = 1 / proj[1]
semi_maj = proj[0]
semi_min = float(semi_maj * (1 - f))
ecc1sq = float(f * (2 - f))
ecc2sq = float(ecc1sq/(1 - ecc1sq))
ecc1 = sqrt(ecc1sq)
n = f / (2 - f)
n = float(n)
n2 = n ** 2
# Rectifying Radius (Horner Form)
A = proj[0] / (1 + n) * ((n2 *
(n2 *
(n2 *
(25 * n2 + 64)
+ 256)
+ 4096)
+ 16384)
/ 16384.)
# Beta Coefficients (Horner Form)
b2 = ((n *
(n *
(n *
(n *
(n *
(n *
((37845269 - 31777436 * n) - 43097152)
+ 42865200)
+ 752640)
- 104428800)
+ 180633600)
- 135475200))
/ 270950400.)
b4 = ((n ** 2 *
(n *
(n *
(n *
(n *
((-24749483 * n - 14930208) * n + 100683990)
- 152616960)
+ 105719040)
- 23224320)
- 7257600))
/ 348364800.)
b6 = ((n ** 3 *
(n *
(n *
(n *
(n *
(232468668 * n - 101880889)
- 39205760)
+ 29795040)
+ 28131840)
- 22619520))
/ 638668800.)
b8 = ((n ** 4 *
(n *
(n *
((-324154477 * n - 1433121792) * n + 876745056)
+ 167270400)
- 208945440))
/ 7664025600.)
b10 = ((n ** 5 *
(n *
(n *
(312227409 - 457888660 * n)
+ 67920528)
- 70779852))
/ 2490808320.)
b12 = ((n ** 6 *
(n *
(19841813847 * n + 3665348512)
- 3758062126))
/ 116237721600.)
b14 = ((n ** 7 *
(1989295244 * n - 1979471673))
/ 49816166400.)
b16 = ((-191773887257 * n ** 8) / 3719607091200.)
def dd2dms(dd):
minutes, seconds = divmod(abs(dd) * 3600, 60)
degrees, minutes = divmod(minutes, 60)
dms = degrees + (minutes / 100) + (seconds / 10000)
return dms if dd >= 0 else -dms
def grid2geo(zone, easting, northing):
"""
input: Zone, Easting and Northing of a point in metres.
(Default projection is Universal Transverse Mercator.)
output: Latitude and Longitude in Decimal Degrees.
"""
# Transverse Mercator Co-ordinates
x = (easting - float(proj[2])) / float(proj[4])
y = (northing - float(proj[3])) / float(proj[4])
# Transverse Mercator Ratios
xi = y / A
eta = x / A
# Gauss-Schreiber Ratios
xi2 = b2 * sin(2 * xi) * cosh(2 * eta)
xi4 = b4 * sin(4 * xi) * cosh(4 * eta)
xi6 = b6 * sin(6 * xi) * cosh(6 * eta)
xi8 = b8 * sin(8 * xi) * cosh(8 * eta)
xi10 = b10 * sin(10 * xi) * cosh(10 * eta)
xi12 = b12 * sin(12 * xi) * cosh(12 * eta)
xi14 = b14 * sin(14 * xi) * cosh(14 * eta)
xi16 = b16 * sin(16 * xi) * cosh(16 * eta)
eta2 = b2 * cos(2 * xi) * sinh(2 * eta)
eta4 = b4 * cos(4 * xi) * sinh(4 * eta)
eta6 = b6 * cos(6 * xi) * sinh(6 * eta)
eta8 = b8 * cos(8 * xi) * sinh(8 * eta)
eta10 = b10 * cos(10 * xi) * sinh(10 * eta)
eta12 = b12 * cos(12 * xi) * sinh(12 * eta)
eta14 = b14 * cos(14 * xi) * sinh(14 * eta)
eta16 = b16 * cos(16 * xi) * sinh(16 * eta)
xi1 = xi + xi2 + xi4 + xi6 + xi8 + xi10 + xi12 + xi14 + xi16
eta1 = eta + eta2 + eta4 + eta6 + eta8 + eta10 + eta12 + eta14 + eta16
# Conformal Latitude
conf_lat = (sin(xi1)) / (sqrt((sinh(eta1)) ** 2 + (cos(xi1)) ** 2))
t1 = conf_lat
conf_lat = atan(conf_lat)
# Finding t using Newtons Method
def sigma(t):
sigma = sinh(
ecc1 * 0.5 * log((1 + ((ecc1 * t) / (sqrt(1 + t ** 2)))) / (1 - ((ecc1 * t) / (sqrt(1 + t ** 2))))))
return sigma
def ftn(t):
ftn = t * sqrt(1 + (sigma(t)) ** 2) - sigma(t) * sqrt(1 + t ** 2) - t1
return ftn
def f1tn(t):
f1tn = (sqrt(1 + (sigma(t)) ** 2) * sqrt(1 + t ** 2) - sigma(t) * t) * (
((1 - float(ecc1sq)) * sqrt(1 + t ** 2)) / (1 + (1 - float(ecc1sq)) * t ** 2))
return f1tn
t2 = t1 - (ftn(t1)) / (f1tn(t1))
t3 = t2 - (ftn(t2)) / (f1tn(t2))
t4 = t3 - (ftn(t3)) / (f1tn(t3))
# Test No of Iterations Required (this will impact script performance)
# t5 = t4 - (ftn(t4))/(f1tn(t4))
# Compute Latitude
lat = degrees(atan(t4))
# Compute Longitude
cm = float((zone * proj[5]) + proj[6] - proj[5])
long_diff = degrees(atan(sinh(eta1) / cos(xi1)))
long = cm + long_diff
return round(lat, 11), round(long, 11)
def grid2geoio(fn):
"""
No Input:
Prompts the user for the name of a file in csv format. Data in the file
must be in the form Point ID, UTM Zone, Easting (m), Northing (m) with
no header line.
No Output:
Uses the function grid2geo to convert each row in the csv file into a
latitude and longitude in Degrees, Minutes and Seconds. This data is
written to a new file with the name <inputfile>_out.csv
"""
# Open Filename
csvfile = open(fn)
csvreader = csv.reader(csvfile)
# Create Output File
fn_part = (os.path.splitext(fn))
fn_out = fn_part[0] + '_out' + fn_part[1]
outfile = open(fn_out, 'w', newline='')
# Write Output
outfilewriter = csv.writer(outfile)
# outfilewriter.writerow(['Pt', 'Latitude', 'Longitude'])
for row in csvreader:
pt_num = row[0]
zone = float(row[1])
east = float(row[2])
north = float(row[3])
# Calculate Conversion
lat, long = grid2geo(zone, east, north)
lat = dd2dms(lat)
long = dd2dms(long)
output = [pt_num, lat, long]
outfilewriter.writerow(output)
# Close Files
outfile.close()
csvfile.close()
return 'Output saved as ' + str(fn_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Batch Converter of Map Grid of Australia grid co-ordinates to '
'Geodetic Datum of Australia geographic co-ordinates. Files must '
'be .csv and of the format Pt ID, Zone, Easting, Northing with no '
'header line.')
parser.add_argument('f', metavar='--file', type=str, help='Input Filename')
args = parser.parse_args()
fn = args.f
grid2geoio(fn)
``` |
{
"source": "JonathonReinhart/netbox",
"score": 2
} |
#### File: extras/forms/customfields.py
```python
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from extras.choices import *
from extras.models import *
from utilities.forms import BootstrapMixin, BulkEditBaseForm, CSVModelForm
__all__ = (
'CustomFieldModelCSVForm',
'CustomFieldModelBulkEditForm',
'CustomFieldModelFilterForm',
'CustomFieldModelForm',
'CustomFieldsMixin',
)
class CustomFieldsMixin:
"""
Extend a Form to include custom field support.
"""
def __init__(self, *args, **kwargs):
self.custom_fields = []
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _get_content_type(self):
"""
Return the ContentType of the form's model.
"""
if not hasattr(self, 'model'):
raise NotImplementedError(f"{self.__class__.__name__} must specify a model class.")
return ContentType.objects.get_for_model(self.model)
def _get_custom_fields(self, content_type):
return CustomField.objects.filter(content_types=content_type)
def _get_form_field(self, customfield):
return customfield.to_form_field()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this object type.
"""
for customfield in self._get_custom_fields(self._get_content_type()):
field_name = f'cf_{customfield.name}'
self.fields[field_name] = self._get_form_field(customfield)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldModelForm(BootstrapMixin, CustomFieldsMixin, forms.ModelForm):
"""
Extend ModelForm to include custom field support.
"""
def _get_content_type(self):
return ContentType.objects.get_for_model(self._meta.model)
def _get_form_field(self, customfield):
if self.instance.pk:
form_field = customfield.to_form_field(set_initial=False)
form_field.initial = self.instance.custom_field_data.get(customfield.name, None)
return form_field
return customfield.to_form_field()
def clean(self):
# Save custom field data on instance
for cf_name in self.custom_fields:
key = cf_name[3:] # Strip "cf_" from field name
value = self.cleaned_data.get(cf_name)
empty_values = self.fields[cf_name].empty_values
# Convert "empty" values to null
self.instance.custom_field_data[key] = value if value not in empty_values else None
return super().clean()
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _get_form_field(self, customfield):
return customfield.to_form_field(for_csv_import=True)
class CustomFieldModelBulkEditForm(BootstrapMixin, CustomFieldsMixin, BulkEditBaseForm):
def _get_form_field(self, customfield):
return customfield.to_form_field(set_initial=False, enforce_required=False)
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this object type.
"""
for customfield in self._get_custom_fields(self._get_content_type()):
# Annotate non-required custom fields as nullable
if not customfield.required:
self.nullable_fields.append(customfield.name)
self.fields[customfield.name] = self._get_form_field(customfield)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(customfield.name)
class CustomFieldModelFilterForm(BootstrapMixin, CustomFieldsMixin, forms.Form):
q = forms.CharField(
required=False,
label='Search'
)
def _get_custom_fields(self, content_type):
return CustomField.objects.filter(content_types=content_type).exclude(
Q(filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED) |
Q(type=CustomFieldTypeChoices.TYPE_JSON)
)
def _get_form_field(self, customfield):
return customfield.to_form_field(set_initial=False, enforce_required=False)
```
#### File: extras/forms/models.py
```python
from django import forms
from django.contrib.contenttypes.models import ContentType
from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup
from extras.choices import *
from extras.models import *
from extras.utils import FeatureQuery
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, BootstrapMixin, CommentField, ContentTypeChoiceField, ContentTypeMultipleChoiceField,
DynamicModelMultipleChoiceField, JSONField, SlugField, StaticSelect,
)
from virtualization.models import Cluster, ClusterGroup
__all__ = (
'AddRemoveTagsForm',
'ConfigContextForm',
'CustomFieldForm',
'CustomLinkForm',
'ExportTemplateForm',
'ImageAttachmentForm',
'JournalEntryForm',
'TagForm',
'WebhookForm',
)
class CustomFieldForm(BootstrapMixin, forms.ModelForm):
content_types = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('custom_fields')
)
class Meta:
model = CustomField
fields = '__all__'
fieldsets = (
('Custom Field', ('name', 'label', 'type', 'weight', 'required', 'description')),
('Assigned Models', ('content_types',)),
('Behavior', ('filter_logic',)),
('Values', ('default', 'choices')),
('Validation', ('validation_minimum', 'validation_maximum', 'validation_regex')),
)
widgets = {
'type': StaticSelect(),
'filter_logic': StaticSelect(),
}
class CustomLinkForm(BootstrapMixin, forms.ModelForm):
content_type = ContentTypeChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('custom_links')
)
class Meta:
model = CustomLink
fields = '__all__'
fieldsets = (
('Custom Link', ('name', 'content_type', 'weight', 'group_name', 'button_class', 'new_window')),
('Templates', ('link_text', 'link_url')),
)
widgets = {
'button_class': StaticSelect(),
'link_text': forms.Textarea(attrs={'class': 'font-monospace'}),
'link_url': forms.Textarea(attrs={'class': 'font-monospace'}),
}
help_texts = {
'link_text': 'Jinja2 template code for the link text. Reference the object as <code>{{ obj }}</code>. '
'Links which render as empty text will not be displayed.',
'link_url': 'Jinja2 template code for the link URL. Reference the object as <code>{{ obj }}</code>.',
}
class ExportTemplateForm(BootstrapMixin, forms.ModelForm):
content_type = ContentTypeChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('export_templates')
)
class Meta:
model = ExportTemplate
fields = '__all__'
fieldsets = (
('Export Template', ('name', 'content_type', 'description')),
('Template', ('template_code',)),
('Rendering', ('mime_type', 'file_extension', 'as_attachment')),
)
widgets = {
'template_code': forms.Textarea(attrs={'class': 'font-monospace'}),
}
class WebhookForm(BootstrapMixin, forms.ModelForm):
content_types = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('webhooks')
)
class Meta:
model = Webhook
fields = '__all__'
fieldsets = (
('Webhook', ('name', 'content_types', 'enabled')),
('Events', ('type_create', 'type_update', 'type_delete')),
('HTTP Request', (
'payload_url', 'http_method', 'http_content_type', 'additional_headers', 'body_template', 'secret',
)),
('Conditions', ('conditions',)),
('SSL', ('ssl_verification', 'ca_file_path')),
)
labels = {
'type_create': 'Creations',
'type_update': 'Updates',
'type_delete': 'Deletions',
}
widgets = {
'http_method': StaticSelect(),
'additional_headers': forms.Textarea(attrs={'class': 'font-monospace'}),
'body_template': forms.Textarea(attrs={'class': 'font-monospace'}),
}
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
fieldsets = (
('Tag', ('name', 'slug', 'color', 'description')),
)
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
self.fields['remove_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
site_groups = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
device_types = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'site_groups', 'sites', 'roles', 'device_types',
'platforms', 'cluster_groups', 'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = [
'name', 'image',
]
class JournalEntryForm(BootstrapMixin, forms.ModelForm):
comments = CommentField()
kind = forms.ChoiceField(
choices=add_blank_choice(JournalEntryKindChoices),
required=False,
widget=StaticSelect()
)
class Meta:
model = JournalEntry
fields = ['assigned_object_type', 'assigned_object_id', 'kind', 'comments']
widgets = {
'assigned_object_type': forms.HiddenInput,
'assigned_object_id': forms.HiddenInput,
}
```
#### File: extras/models/tags.py
```python
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from taggit.models import TagBase, GenericTaggedItemBase
from extras.utils import extras_features
from netbox.models import BigIDModel, ChangeLoggedModel
from utilities.choices import ColorChoices
from utilities.fields import ColorField
#
# Tags
#
@extras_features('webhooks', 'export_templates')
class Tag(ChangeLoggedModel, TagBase):
color = ColorField(
default=ColorChoices.COLOR_GREY
)
description = models.CharField(
max_length=200,
blank=True,
)
class Meta:
ordering = ['name']
def get_absolute_url(self):
return reverse('extras:tag', args=[self.pk])
def slugify(self, tag, i=None):
# Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)
slug = slugify(tag, allow_unicode=True)
if i is not None:
slug += "_%d" % i
return slug
class TaggedItem(BigIDModel, GenericTaggedItemBase):
tag = models.ForeignKey(
to=Tag,
related_name="%(app_label)s_%(class)s_items",
on_delete=models.CASCADE
)
class Meta:
index_together = (
("content_type", "object_id")
)
```
#### File: netbox/netbox/navigation_menu.py
```python
from dataclasses import dataclass
from typing import Sequence, Optional
from extras.registry import registry
from utilities.choices import ButtonColorChoices
#
# Nav menu data classes
#
@dataclass
class MenuItemButton:
link: str
title: str
icon_class: str
permissions: Optional[Sequence[str]] = ()
color: Optional[str] = None
@dataclass
class MenuItem:
link: str
link_text: str
permissions: Optional[Sequence[str]] = ()
buttons: Optional[Sequence[MenuItemButton]] = ()
@dataclass
class MenuGroup:
label: str
items: Sequence[MenuItem]
@dataclass
class Menu:
label: str
icon_class: str
groups: Sequence[MenuGroup]
#
# Utility functions
#
def get_model_item(app_label, model_name, label, actions=('add', 'import')):
return MenuItem(
link=f'{app_label}:{model_name}_list',
link_text=label,
permissions=[f'{app_label}.view_{model_name}'],
buttons=get_model_buttons(app_label, model_name, actions)
)
def get_model_buttons(app_label, model_name, actions=('add', 'import')):
buttons = []
if 'add' in actions:
buttons.append(
MenuItemButton(
link=f'{app_label}:{model_name}_add',
title='Add',
icon_class='mdi mdi-plus-thick',
permissions=[f'{app_label}.add_{model_name}'],
color=ButtonColorChoices.GREEN
)
)
if 'import' in actions:
buttons.append(
MenuItemButton(
link=f'{app_label}:{model_name}_import',
title='Import',
icon_class='mdi mdi-upload',
permissions=[f'{app_label}.add_{model_name}'],
color=ButtonColorChoices.CYAN
)
)
return buttons
#
# Nav menus
#
ORGANIZATION_MENU = Menu(
label='Organization',
icon_class='mdi mdi-domain',
groups=(
MenuGroup(
label='Sites',
items=(
get_model_item('dcim', 'site', 'Sites'),
get_model_item('dcim', 'region', 'Regions'),
get_model_item('dcim', 'sitegroup', 'Site Groups'),
get_model_item('dcim', 'location', 'Locations'),
),
),
MenuGroup(
label='Racks',
items=(
get_model_item('dcim', 'rack', 'Racks'),
get_model_item('dcim', 'rackrole', 'Rack Roles'),
get_model_item('dcim', 'rackreservation', 'Reservations'),
MenuItem(
link='dcim:rack_elevation_list',
link_text='Elevations',
permissions=['dcim.view_rack']
),
),
),
MenuGroup(
label='Tenancy',
items=(
get_model_item('tenancy', 'tenant', 'Tenants'),
get_model_item('tenancy', 'tenantgroup', 'Tenant Groups'),
),
),
MenuGroup(
label='Contacts',
items=(
get_model_item('tenancy', 'contact', 'Contacts'),
get_model_item('tenancy', 'contactgroup', 'Contact Groups'),
get_model_item('tenancy', 'contactrole', 'Contact Roles'),
),
),
),
)
DEVICES_MENU = Menu(
label='Devices',
icon_class='mdi mdi-server',
groups=(
MenuGroup(
label='Devices',
items=(
get_model_item('dcim', 'device', 'Devices'),
get_model_item('dcim', 'devicerole', 'Device Roles'),
get_model_item('dcim', 'platform', 'Platforms'),
get_model_item('dcim', 'virtualchassis', 'Virtual Chassis'),
),
),
MenuGroup(
label='Device Types',
items=(
get_model_item('dcim', 'devicetype', 'Device Types'),
get_model_item('dcim', 'manufacturer', 'Manufacturers'),
),
),
MenuGroup(
label='Device Components',
items=(
get_model_item('dcim', 'interface', 'Interfaces', actions=['import']),
get_model_item('dcim', 'frontport', 'Front Ports', actions=['import']),
get_model_item('dcim', 'rearport', 'Rear Ports', actions=['import']),
get_model_item('dcim', 'consoleport', 'Console Ports', actions=['import']),
get_model_item('dcim', 'consoleserverport', 'Console Server Ports', actions=['import']),
get_model_item('dcim', 'powerport', 'Power Ports', actions=['import']),
get_model_item('dcim', 'poweroutlet', 'Power Outlets', actions=['import']),
get_model_item('dcim', 'devicebay', 'Device Bays', actions=['import']),
get_model_item('dcim', 'inventoryitem', 'Inventory Items', actions=['import']),
),
),
),
)
CONNECTIONS_MENU = Menu(
label='Connections',
icon_class='mdi mdi-ethernet',
groups=(
MenuGroup(
label='Connections',
items=(
get_model_item('dcim', 'cable', 'Cables', actions=['import']),
get_model_item('wireless', 'wirelesslink', 'Wireless Links', actions=['import']),
MenuItem(
link='dcim:interface_connections_list',
link_text='Interface Connections',
permissions=['dcim.view_interface']
),
MenuItem(
link='dcim:console_connections_list',
link_text='Console Connections',
permissions=['dcim.view_consoleport']
),
MenuItem(
link='dcim:power_connections_list',
link_text='Power Connections',
permissions=['dcim.view_powerport']
),
),
),
),
)
WIRELESS_MENU = Menu(
label='Wireless',
icon_class='mdi mdi-wifi',
groups=(
MenuGroup(
label='Wireless',
items=(
get_model_item('wireless', 'wirelesslan', 'Wireless LANs'),
get_model_item('wireless', 'wirelesslangroup', 'Wireless LAN Groups'),
),
),
),
)
IPAM_MENU = Menu(
label='IPAM',
icon_class='mdi mdi-counter',
groups=(
MenuGroup(
label='IP Addresses',
items=(
get_model_item('ipam', 'ipaddress', 'IP Addresses'),
get_model_item('ipam', 'iprange', 'IP Ranges'),
),
),
MenuGroup(
label='Prefixes',
items=(
get_model_item('ipam', 'prefix', 'Prefixes'),
get_model_item('ipam', 'role', 'Prefix & VLAN Roles'),
),
),
MenuGroup(
label='ASNs',
items=(
get_model_item('ipam', 'asn', 'ASNs'),
),
),
MenuGroup(
label='Aggregates',
items=(
get_model_item('ipam', 'aggregate', 'Aggregates'),
get_model_item('ipam', 'rir', 'RIRs'),
),
),
MenuGroup(
label='VRFs',
items=(
get_model_item('ipam', 'vrf', 'VRFs'),
get_model_item('ipam', 'routetarget', 'Route Targets'),
),
),
MenuGroup(
label='VLANs',
items=(
get_model_item('ipam', 'vlan', 'VLANs'),
get_model_item('ipam', 'vlangroup', 'VLAN Groups'),
),
),
MenuGroup(
label='Other',
items=(
get_model_item('ipam', 'fhrpgroup', 'FHRP Groups'),
get_model_item('ipam', 'service', 'Services'),
),
),
),
)
VIRTUALIZATION_MENU = Menu(
label='Virtualization',
icon_class='mdi mdi-monitor',
groups=(
MenuGroup(
label='Virtual Machines',
items=(
get_model_item('virtualization', 'virtualmachine', 'Virtual Machines'),
get_model_item('virtualization', 'vminterface', 'Interfaces', actions=['import']),
),
),
MenuGroup(
label='Clusters',
items=(
get_model_item('virtualization', 'cluster', 'Clusters'),
get_model_item('virtualization', 'clustertype', 'Cluster Types'),
get_model_item('virtualization', 'clustergroup', 'Cluster Groups'),
),
),
),
)
CIRCUITS_MENU = Menu(
label='Circuits',
icon_class='mdi mdi-transit-connection-variant',
groups=(
MenuGroup(
label='Circuits',
items=(
get_model_item('circuits', 'circuit', 'Circuits'),
get_model_item('circuits', 'circuittype', 'Circuit Types'),
),
),
MenuGroup(
label='Providers',
items=(
get_model_item('circuits', 'provider', 'Providers'),
get_model_item('circuits', 'providernetwork', 'Provider Networks'),
),
),
),
)
POWER_MENU = Menu(
label='Power',
icon_class='mdi mdi-flash',
groups=(
MenuGroup(
label='Power',
items=(
get_model_item('dcim', 'powerfeed', 'Power Feeds'),
get_model_item('dcim', 'powerpanel', 'Power Panels'),
),
),
),
)
OTHER_MENU = Menu(
label='Other',
icon_class='mdi mdi-notification-clear-all',
groups=(
MenuGroup(
label='Logging',
items=(
get_model_item('extras', 'journalentry', 'Journal Entries', actions=[]),
get_model_item('extras', 'objectchange', 'Change Log', actions=[]),
),
),
MenuGroup(
label='Customization',
items=(
get_model_item('extras', 'customfield', 'Custom Fields'),
get_model_item('extras', 'customlink', 'Custom Links'),
get_model_item('extras', 'exporttemplate', 'Export Templates'),
),
),
MenuGroup(
label='Integrations',
items=(
get_model_item('extras', 'webhook', 'Webhooks'),
MenuItem(
link='extras:report_list',
link_text='Reports',
permissions=['extras.view_report']
),
MenuItem(
link='extras:script_list',
link_text='Scripts',
permissions=['extras.view_script']
),
),
),
MenuGroup(
label='Other',
items=(
get_model_item('extras', 'tag', 'Tags'),
get_model_item('extras', 'configcontext', 'Config Contexts', actions=['add']),
),
),
),
)
MENUS = [
ORGANIZATION_MENU,
DEVICES_MENU,
CONNECTIONS_MENU,
WIRELESS_MENU,
IPAM_MENU,
VIRTUALIZATION_MENU,
CIRCUITS_MENU,
POWER_MENU,
OTHER_MENU,
]
#
# Add plugin menus
#
if registry['plugin_menu_items']:
plugin_menu_groups = []
for plugin_name, items in registry['plugin_menu_items'].items():
plugin_menu_groups.append(
MenuGroup(
label=plugin_name,
items=items
)
)
PLUGIN_MENU = Menu(
label="Plugins",
icon_class="mdi mdi-puzzle",
groups=plugin_menu_groups
)
MENUS.append(PLUGIN_MENU)
```
#### File: tenancy/api/views.py
```python
from rest_framework.routers import APIRootView
from circuits.models import Circuit
from dcim.models import Device, Rack, Site, Cable
from extras.api.views import CustomFieldModelViewSet
from ipam.models import IPAddress, Prefix, VLAN, VRF
from tenancy import filtersets
from tenancy.models import *
from utilities.utils import count_related
from virtualization.models import VirtualMachine, Cluster
from . import serializers
class TenancyRootView(APIRootView):
"""
Tenancy API root view
"""
def get_view_name(self):
return 'Tenancy'
#
# Tenants
#
class TenantGroupViewSet(CustomFieldModelViewSet):
queryset = TenantGroup.objects.add_related_count(
TenantGroup.objects.all(),
Tenant,
'group',
'tenant_count',
cumulative=True
).prefetch_related('tags')
serializer_class = serializers.TenantGroupSerializer
filterset_class = filtersets.TenantGroupFilterSet
class TenantViewSet(CustomFieldModelViewSet):
queryset = Tenant.objects.prefetch_related(
'group', 'tags'
).annotate(
circuit_count=count_related(Circuit, 'tenant'),
device_count=count_related(Device, 'tenant'),
ipaddress_count=count_related(IPAddress, 'tenant'),
prefix_count=count_related(Prefix, 'tenant'),
rack_count=count_related(Rack, 'tenant'),
site_count=count_related(Site, 'tenant'),
virtualmachine_count=count_related(VirtualMachine, 'tenant'),
vlan_count=count_related(VLAN, 'tenant'),
vrf_count=count_related(VRF, 'tenant'),
cluster_count=count_related(Cluster, 'tenant')
)
serializer_class = serializers.TenantSerializer
filterset_class = filtersets.TenantFilterSet
#
# Contacts
#
class ContactGroupViewSet(CustomFieldModelViewSet):
queryset = ContactGroup.objects.add_related_count(
ContactGroup.objects.all(),
Contact,
'group',
'contact_count',
cumulative=True
).prefetch_related('tags')
serializer_class = serializers.ContactGroupSerializer
filterset_class = filtersets.ContactGroupFilterSet
class ContactRoleViewSet(CustomFieldModelViewSet):
queryset = ContactRole.objects.prefetch_related('tags')
serializer_class = serializers.ContactRoleSerializer
filterset_class = filtersets.ContactRoleFilterSet
class ContactViewSet(CustomFieldModelViewSet):
queryset = Contact.objects.prefetch_related('group', 'tags')
serializer_class = serializers.ContactSerializer
filterset_class = filtersets.ContactFilterSet
class ContactAssignmentViewSet(CustomFieldModelViewSet):
queryset = ContactAssignment.objects.prefetch_related('object', 'contact', 'role')
serializer_class = serializers.ContactAssignmentSerializer
filterset_class = filtersets.ContactAssignmentFilterSet
```
#### File: tenancy/tests/test_filtersets.py
```python
from django.test import TestCase
from tenancy.filtersets import *
from tenancy.models import *
from utilities.testing import ChangeLoggedFilterSetTests
class TenantGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = TenantGroup.objects.all()
filterset = TenantGroupFilterSet
@classmethod
def setUpTestData(cls):
parent_tenant_groups = (
TenantGroup(name='Parent Tenant Group 1', slug='parent-tenant-group-1'),
TenantGroup(name='Parent Tenant Group 2', slug='parent-tenant-group-2'),
TenantGroup(name='Parent Tenant Group 3', slug='parent-tenant-group-3'),
)
for tenantgroup in parent_tenant_groups:
tenantgroup.save()
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1', parent=parent_tenant_groups[0], description='A'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2', parent=parent_tenant_groups[1], description='B'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3', parent=parent_tenant_groups[2], description='C'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_groups = TenantGroup.objects.filter(name__startswith='Parent')[:2]
params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Tenant.objects.all()
filterset = TenantFilterSet
@classmethod
def setUpTestData(cls):
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ContactGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ContactGroup.objects.all()
filterset = ContactGroupFilterSet
@classmethod
def setUpTestData(cls):
parent_contact_groups = (
ContactGroup(name='Parent Contact Group 1', slug='parent-contact-group-1'),
ContactGroup(name='Parent Contact Group 2', slug='parent-contact-group-2'),
ContactGroup(name='Parent Contact Group 3', slug='parent-contact-group-3'),
)
for contactgroup in parent_contact_groups:
contactgroup.save()
contact_groups = (
ContactGroup(name='Contact Group 1', slug='contact-group-1', parent=parent_contact_groups[0], description='A'),
ContactGroup(name='Contact Group 2', slug='contact-group-2', parent=parent_contact_groups[1], description='B'),
ContactGroup(name='Contact Group 3', slug='contact-group-3', parent=parent_contact_groups[2], description='C'),
)
for contactgroup in contact_groups:
contactgroup.save()
def test_name(self):
params = {'name': ['Contact Group 1', 'Contact Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['contact-group-1', 'contact-group-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_groups = ContactGroup.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ContactRoleTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ContactRole.objects.all()
filterset = ContactRoleFilterSet
@classmethod
def setUpTestData(cls):
contact_roles = (
ContactRole(name='Contact Role 1', slug='contact-role-1'),
ContactRole(name='Contact Role 2', slug='contact-role-2'),
ContactRole(name='Contact Role 3', slug='contact-role-3'),
)
ContactRole.objects.bulk_create(contact_roles)
def test_name(self):
params = {'name': ['Contact Role 1', 'Contact Role 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['contact-role-1', 'contact-role-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ContactTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Contact.objects.all()
filterset = ContactFilterSet
@classmethod
def setUpTestData(cls):
contact_groups = (
ContactGroup(name='Contact Group 1', slug='contact-group-1'),
ContactGroup(name='Contact Group 2', slug='contact-group-2'),
ContactGroup(name='Contact Group 3', slug='contact-group-3'),
)
for contactgroup in contact_groups:
contactgroup.save()
contacts = (
Contact(name='Contact 1', group=contact_groups[0]),
Contact(name='Contact 2', group=contact_groups[1]),
Contact(name='Contact 3', group=contact_groups[2]),
)
Contact.objects.bulk_create(contacts)
def test_name(self):
params = {'name': ['Contact 1', 'Contact 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_group(self):
group = ContactGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
```
#### File: wireless/api/views.py
```python
from rest_framework.routers import APIRootView
from extras.api.views import CustomFieldModelViewSet
from wireless import filtersets
from wireless.models import *
from . import serializers
class WirelessRootView(APIRootView):
"""
Wireless API root view
"""
def get_view_name(self):
return 'Wireless'
class WirelessLANGroupViewSet(CustomFieldModelViewSet):
queryset = WirelessLANGroup.objects.add_related_count(
WirelessLANGroup.objects.all(),
WirelessLAN,
'group',
'wirelesslan_count',
cumulative=True
)
serializer_class = serializers.WirelessLANGroupSerializer
filterset_class = filtersets.WirelessLANGroupFilterSet
class WirelessLANViewSet(CustomFieldModelViewSet):
queryset = WirelessLAN.objects.prefetch_related('vlan', 'tags')
serializer_class = serializers.WirelessLANSerializer
filterset_class = filtersets.WirelessLANFilterSet
class WirelessLinkViewSet(CustomFieldModelViewSet):
queryset = WirelessLink.objects.prefetch_related('interface_a', 'interface_b', 'tags')
serializer_class = serializers.WirelessLinkSerializer
filterset_class = filtersets.WirelessLinkFilterSet
```
#### File: netbox/wireless/filtersets.py
```python
import django_filters
from django.db.models import Q
from dcim.choices import LinkStatusChoices
from extras.filters import TagFilter
from ipam.models import VLAN
from netbox.filtersets import OrganizationalModelFilterSet, PrimaryModelFilterSet
from utilities.filters import MultiValueNumberFilter, TreeNodeMultipleChoiceFilter
from .choices import *
from .models import *
__all__ = (
'WirelessLANFilterSet',
'WirelessLANGroupFilterSet',
'WirelessLinkFilterSet',
)
class WirelessLANGroupFilterSet(OrganizationalModelFilterSet):
parent_id = django_filters.ModelMultipleChoiceFilter(
queryset=WirelessLANGroup.objects.all()
)
parent = django_filters.ModelMultipleChoiceFilter(
field_name='parent__slug',
queryset=WirelessLANGroup.objects.all(),
to_field_name='slug'
)
tag = TagFilter()
class Meta:
model = WirelessLANGroup
fields = ['id', 'name', 'slug', 'description']
class WirelessLANFilterSet(PrimaryModelFilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
group_id = TreeNodeMultipleChoiceFilter(
queryset=WirelessLANGroup.objects.all(),
field_name='group',
lookup_expr='in'
)
group = TreeNodeMultipleChoiceFilter(
queryset=WirelessLANGroup.objects.all(),
field_name='group',
lookup_expr='in',
to_field_name='slug'
)
vlan_id = django_filters.ModelMultipleChoiceFilter(
queryset=VLAN.objects.all()
)
auth_type = django_filters.MultipleChoiceFilter(
choices=WirelessAuthTypeChoices
)
auth_cipher = django_filters.MultipleChoiceFilter(
choices=WirelessAuthCipherChoices
)
tag = TagFilter()
class Meta:
model = WirelessLAN
fields = ['id', 'ssid', 'auth_psk']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = (
Q(ssid__icontains=value) |
Q(description__icontains=value)
)
return queryset.filter(qs_filter)
class WirelessLinkFilterSet(PrimaryModelFilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
interface_a_id = MultiValueNumberFilter()
interface_b_id = MultiValueNumberFilter()
status = django_filters.MultipleChoiceFilter(
choices=LinkStatusChoices
)
auth_type = django_filters.MultipleChoiceFilter(
choices=WirelessAuthTypeChoices
)
auth_cipher = django_filters.MultipleChoiceFilter(
choices=WirelessAuthCipherChoices
)
tag = TagFilter()
class Meta:
model = WirelessLink
fields = ['id', 'ssid', 'auth_psk']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = (
Q(ssid__icontains=value) |
Q(description__icontains=value)
)
return queryset.filter(qs_filter)
``` |
{
"source": "JonathonReinhart/psunlinked",
"score": 2
} |
#### File: JonathonReinhart/psunlinked/psunlinked.py
```python
from __future__ import print_function
import psutil
import re
from pprint import pprint
import argparse
class Data(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return '<Data({0})>'.format(', '.join('{key}={val}'.format(key=key, val=repr(val))
for key,val in self.__dict__.iteritems()))
def parse_mapline(line):
# 7fbe721a4000-7fbe7235b000 r-xp 00000000 fd:01 1185241 /usr/lib64/libc-2.21.so (deleted)
# fs/proc/task_mmu.c show_map_vma()
# seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
# start,
# end,
# flags & VM_READ ? 'r' : '-',
# flags & VM_WRITE ? 'w' : '-',
# flags & VM_EXEC ? 'x' : '-',
# flags & VM_MAYSHARE ? 's' : 'p',
# pgoff,
# MAJOR(dev), MINOR(dev), ino);
parts = line.split(None, 5)
addr = parts[0].split('-')
flags = parts[1]
dev = parts[3].split(':')
try:
path = parts[5].rstrip()
except IndexError:
path = ''
deleted = False
if path.endswith(' (deleted)'):
path = path[:-10]
deleted = True
return Data(
start = int(addr[0], 16),
end = int(addr[1], 16),
readable = flags[0] == 'r',
writable = flags[1] == 'w',
executable = flags[2] == 'x',
mayshare = flags[3] == 's',
pgoff = int(parts[2], 16),
major = int(dev[0], 16),
minor = int(dev[1], 16),
inode = int(parts[4], 10),
path = path,
deleted = deleted,
)
def read_maps(pid):
try:
with open('/proc/{pid}/maps'.format(pid=pid), 'r') as f:
for line in f:
yield parse_mapline(line)
except IOError:
raise psutil.AccessDenied()
def handle_proc(proc, show_files=False):
printed_name = False
for m in read_maps(proc.pid):
if m.executable and m.deleted:
if not printed_name:
printed_name = True
print('[{0}] {1}'.format(proc.pid, proc.name()))
if show_files:
print(' ' + m.path)
def parse_args():
ap = argparse.ArgumentParser(description='Find processes executing deleted files')
ap.add_argument('--show-files', '-f', action='store_true',
help='Show deleted file paths')
return ap.parse_args()
def main():
args = parse_args()
print('Processes executing deleted files:')
for proc in psutil.process_iter():
try:
handle_proc(proc, show_files=args.show_files)
except psutil.AccessDenied:
continue
if __name__ == '__main__':
main()
``` |
{
"source": "JonathonReinhart/scuba",
"score": 3
} |
#### File: scuba/scuba/__main__.py
```python
import os
import sys
import shlex
import itertools
import argparse
try:
import argcomplete
except ImportError:
class argcomplete:
@staticmethod
def autocomplete(*_, **__):
pass
from . import dockerutil
from .config import find_config, ScubaConfig, ConfigError, ConfigNotFoundError
from .dockerutil import DockerError, DockerExecuteError
from .scuba import ScubaDive, ScubaError
from .utils import format_cmdline, parse_env_var
from .version import __version__
def appmsg(fmt, *args):
print('scuba: ' + fmt.format(*args), file=sys.stderr)
def parse_scuba_args(argv):
def _list_images_completer(**_):
return dockerutil.get_images()
def _list_aliases_completer(parsed_args, **_):
# We don't want to try to complete any aliases if one was already given
if parsed_args.command:
return []
try:
_, _, config = find_config()
return sorted(config.aliases)
except (ConfigNotFoundError, ConfigError):
argcomplete.warn('No or invalid config found. Cannot auto-complete aliases.')
return []
ap = argparse.ArgumentParser(description='Simple Container-Utilizing Build Apparatus')
ap.add_argument('-d', '--docker-arg', dest='docker_args', action='append',
type=lambda x: shlex.split(x), default=[],
help="Pass additional arguments to 'docker run'")
ap.add_argument('-e', '--env', dest='env_vars', action='append',
type=parse_env_var, default=[],
help='Environment variables to pass to docker')
ap.add_argument('--entrypoint',
help='Override the default ENTRYPOINT of the image')
ap.add_argument('--image', help='Override Docker image').completer = _list_images_completer
ap.add_argument('--shell', help='Override shell used in Docker container')
ap.add_argument('-n', '--dry-run', action='store_true',
help="Don't actually invoke docker; just print the docker cmdline")
ap.add_argument('-r', '--root', action='store_true',
help="Run container as root (don't create scubauser)")
ap.add_argument('-v', '--version', action='version', version='scuba ' + __version__)
ap.add_argument('-V', '--verbose', action='store_true',
help="Be verbose")
ap.add_argument('command', nargs=argparse.REMAINDER,
help="Command (and arguments) to run in the container").completer = _list_aliases_completer
argcomplete.autocomplete(ap, always_complete_options=False)
args = ap.parse_args(argv)
# Flatten docker arguments into single list
args.docker_args = list(itertools.chain.from_iterable(args.docker_args))
# Convert env var tuples into a dict, forbidding duplicates
env = dict()
for k,v in args.env_vars:
if k in env:
ap.error("Duplicate env var {}".format(k))
env[k] = v
args.env_vars = env
global g_verbose
g_verbose = args.verbose
return args
def run_scuba(scuba_args):
# Locate .scuba.yml
try:
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
top_path, top_rel, config = find_config()
except ConfigNotFoundError:
# .scuba.yml is allowed to be missing if --image was given.
if not scuba_args.image:
raise
top_path, top_rel, config = os.getcwd(), '', ScubaConfig()
# Set up scuba Docker invocation
dive = ScubaDive(
user_command = scuba_args.command,
config = config,
top_path = top_path,
top_rel = top_rel,
docker_args = scuba_args.docker_args,
env = scuba_args.env_vars,
as_root = scuba_args.root,
verbose = scuba_args.verbose,
image_override = scuba_args.image,
entrypoint = scuba_args.entrypoint,
shell_override = scuba_args.shell,
keep_tempfiles = scuba_args.dry_run,
)
with dive:
run_args = dive.get_docker_cmdline()
if g_verbose or scuba_args.dry_run:
print(str(dive))
print()
appmsg('Docker command line:')
print('$ ' + format_cmdline(run_args))
if scuba_args.dry_run:
appmsg("Temp files not cleaned up")
return 0
# Explicitly pass sys.stdin/stdout/stderr so they apply to the
# child process if overridden (by tests).
return dockerutil.call(
args = run_args,
stdin = sys.stdin,
stdout = sys.stdout,
stderr = sys.stderr,
)
def main(argv=None):
scuba_args = parse_scuba_args(argv)
try:
rc = run_scuba(scuba_args) or 0
sys.exit(rc)
except ConfigError as e:
appmsg("Config error: " + str(e))
sys.exit(128)
except DockerExecuteError as e:
appmsg(str(e))
sys.exit(2)
except (ScubaError, DockerError) as e:
appmsg(str(e))
sys.exit(128)
if __name__ == '__main__':
main()
```
#### File: scuba/tests/test_config.py
```python
from .utils import *
import pytest
import logging
import os
from os.path import join
from shutil import rmtree
import scuba.config
class TestCommonScriptSchema:
def test_simple(self):
'''Simple form: value is a string'''
node = 'foo'
result = scuba.config._process_script_node(node, 'dontcare')
assert result == ['foo']
def test_script_key_string(self):
'''Value is a mapping: script is a string'''
node = dict(
script = 'foo',
otherkey = 'other',
)
result = scuba.config._process_script_node(node, 'dontcare')
assert result == ['foo']
def test_script_key_list(self):
'''Value is a mapping: script is a list'''
node = dict(
script = [
'foo',
'bar',
],
otherkey = 'other',
)
result = scuba.config._process_script_node(node, 'dontcare')
assert result == ['foo', 'bar']
def test_script_key_mapping_invalid(self):
'''Value is a mapping: script is a mapping (invalid)'''
node = dict(
script = dict(
whatisthis = 'idontknow',
),
)
with pytest.raises(scuba.config.ConfigError):
scuba.config._process_script_node(node, 'dontcare')
@pytest.mark.usefixtures("in_tmp_path")
class TestConfig:
######################################################################
# Find config
def test_find_config_cur_dir(self, in_tmp_path):
'''find_config can find the config in the current directory'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
path, rel, _ = scuba.config.find_config()
assert_paths_equal(path, in_tmp_path)
assert_paths_equal(rel, '')
def test_find_config_parent_dir(self, in_tmp_path):
'''find_config cuba can find the config in the parent directory'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
os.mkdir('subdir')
os.chdir('subdir')
# Verify our current working dir
assert_paths_equal(os.getcwd(), in_tmp_path.joinpath('subdir'))
path, rel, _ = scuba.config.find_config()
assert_paths_equal(path, in_tmp_path)
assert_paths_equal(rel, 'subdir')
def test_find_config_way_up(self, in_tmp_path):
'''find_config can find the config way up the directory hierarchy'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
subdirs = ['foo', 'bar', 'snap', 'crackle', 'pop']
for sd in subdirs:
os.mkdir(sd)
os.chdir(sd)
# Verify our current working dir
assert_paths_equal(os.getcwd(), in_tmp_path.joinpath(*subdirs))
path, rel, _ = scuba.config.find_config()
assert_paths_equal(path, in_tmp_path)
assert_paths_equal(rel, join(*subdirs))
def test_find_config_nonexist(self):
'''find_config raises ConfigError if the config cannot be found'''
with pytest.raises(scuba.config.ConfigError):
scuba.config.find_config()
######################################################################
# Load config
def _invalid_config(self, match=None):
with pytest.raises(scuba.config.ConfigError, match=match) as e:
scuba.config.load_config('.scuba.yml')
def test_load_config_no_image(self):
'''load_config raises ConfigError if the config is empty and image is referenced'''
with open('.scuba.yml', 'w') as f:
pass
config = scuba.config.load_config('.scuba.yml')
with pytest.raises(scuba.config.ConfigError):
img = config.image
def test_load_unexpected_node(self):
'''load_config raises ConfigError on unexpected config node'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
f.write('unexpected_node_123456: value\n')
self._invalid_config()
def test_load_config_minimal(self):
'''load_config loads a minimal config'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == 'bosybux'
def test_load_config_with_aliases(self):
'''load_config loads a config with aliases'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
f.write('aliases:\n')
f.write(' foo: bar\n')
f.write(' snap: crackle pop\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == 'bosybux'
assert len(config.aliases) == 2
assert config.aliases['foo'].script == ['bar']
assert config.aliases['snap'].script == ['crackle pop']
def test_load_config__no_spaces_in_aliases(self):
'''load_config refuses spaces in aliases'''
with open('.scuba.yml', 'w') as f:
f.write('image: bosybux\n')
f.write('aliases:\n')
f.write(' this has spaces: whatever\n')
self._invalid_config()
def test_load_config_image_from_yaml(self):
'''load_config loads a config using !from_yaml'''
with open('.gitlab.yml', 'w') as f:
f.write('image: dummian:8.2\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml image\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == 'dummian:8.2'
def test_load_config_image_from_yaml_nested_keys(self):
'''load_config loads a config using !from_yaml with nested keys'''
with open('.gitlab.yml', 'w') as f:
f.write('somewhere:\n')
f.write(' down:\n')
f.write(' here: dummian:8.2\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml somewhere.down.here\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == 'dummian:8.2'
def test_load_config_image_from_yaml_nested_keys_with_escaped_characters(self):
'''load_config loads a config using !from_yaml with nested keys containing escaped '.' characters'''
with open('.gitlab.yml', 'w') as f:
f.write('.its:\n')
f.write(' somewhere.down:\n')
f.write(' here: dummian:8.2\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml "\\.its.somewhere\\.down.here"\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == 'dummian:8.2'
def test_load_config_from_yaml_cached_file(self):
'''load_config loads a config using !from_yaml from cached version'''
with open('.gitlab.yml', 'w') as f:
f.write('one: dummian:8.2\n')
f.write('two: dummian:9.3\n')
f.write('three: dummian:10.1\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml one\n')
f.write('aliases:\n')
f.write(' two:\n')
f.write(' image: !from_yaml .gitlab.yml two\n')
f.write(' script: ugh\n')
f.write(' three:\n')
f.write(' image: !from_yaml .gitlab.yml three\n')
f.write(' script: ugh\n')
with mock_open() as m:
config = scuba.config.load_config('.scuba.yml')
# Assert that .gitlab.yml was only opened once
assert m.mock_calls == [
mock.call('.scuba.yml', 'r'),
mock.call('.gitlab.yml', 'r'),
]
def test_load_config_image_from_yaml_nested_key_missing(self):
'''load_config raises ConfigError when !from_yaml references nonexistant key'''
with open('.gitlab.yml', 'w') as f:
f.write('somewhere:\n')
f.write(' down:\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml somewhere.NONEXISTANT\n')
self._invalid_config()
def test_load_config_image_from_yaml_missing_file(self):
'''load_config raises ConfigError when !from_yaml references nonexistant file'''
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .NONEXISTANT.yml image\n')
self._invalid_config()
def test_load_config_image_from_yaml_unicode_args(self):
'''load_config !from_yaml works with unicode args'''
with open('.gitlab.yml', 'w') as f:
f.write('𝕦𝕟𝕚𝕔𝕠𝕕𝕖: 𝕨𝕠𝕣𝕜𝕤:𝕠𝕜\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml 𝕦𝕟𝕚𝕔𝕠𝕕𝕖\n')
config = scuba.config.load_config('.scuba.yml')
assert config.image == '𝕨𝕠𝕣𝕜𝕤:𝕠𝕜'
def test_load_config_image_from_yaml_missing_arg(self):
'''load_config raises ConfigError when !from_yaml has missing args'''
with open('.gitlab.yml', 'w') as f:
f.write('image: dummian:8.2\n')
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .gitlab.yml\n')
self._invalid_config()
def __test_load_config_safe(self, bad_yaml_path):
with open(bad_yaml_path, 'w') as f:
f.write('danger:\n')
f.write(' - !!python/object/apply:print [Danger]\n')
f.write(' - !!python/object/apply:sys.exit [66]\n')
pat = "could not determine a constructor for the tag.*python/object/apply"
with pytest.raises(scuba.config.ConfigError, match=pat) as ctx:
scuba.config.load_config('.scuba.yml')
def test_load_config_safe(self):
'''load_config safely loads yaml'''
self.__test_load_config_safe('.scuba.yml')
def test_load_config_safe_external(self):
'''load_config safely loads yaml from external files'''
with open('.scuba.yml', 'w') as f:
f.write('image: !from_yaml .external.yml danger\n')
self.__test_load_config_safe('.external.yml')
############################################################################
# Hooks
def test_hooks_mixed(self):
'''hooks of mixed forms are valid'''
with open('.scuba.yml', 'w') as f:
f.write('''
image: na
hooks:
root:
script:
- echo "This runs before we switch users"
- id
user: id
''')
config = scuba.config.load_config('.scuba.yml')
assert config.hooks.get('root') == ['echo "This runs before we switch users"', 'id']
assert config.hooks.get('user') == ['id']
def test_hooks_invalid_list(self):
'''hooks with list not under "script" key are invalid'''
with open('.scuba.yml', 'w') as f:
f.write('''
image: na
hooks:
user:
- this list should be under
- a 'script'
''')
self._invalid_config()
def test_hooks_missing_script(self):
'''hooks with dict, but missing "script" are invalid'''
with open('.scuba.yml', 'w') as f:
f.write('''
image: na
hooks:
user:
not_script: missing "script" key
''')
self._invalid_config()
############################################################################
# Env
def test_env_invalid(self):
'''Environment must be dict or list of strings'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
environment: 666
''')
self._invalid_config('must be list or mapping')
def test_env_top_dict(self, monkeypatch):
'''Top-level environment can be loaded (dict)'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
environment:
FOO: This is foo
FOO_WITH_QUOTES: "\"Quoted foo\"" # Quotes included in value
BAR: "This is bar"
MAGIC: 42
SWITCH_1: true # YAML boolean
SWITCH_2: "true" # YAML string
EMPTY: ""
EXTERNAL: # Comes from os env
EXTERNAL_NOTSET: # Missing in os env
''')
monkeypatch.setenv('EXTERNAL', 'Outside world')
monkeypatch.delenv('EXTERNAL_NOTSET', raising=False)
config = scuba.config.load_config('.scuba.yml')
expect = dict(
FOO = "This is foo",
FOO_WITH_QUOTES = "\"Quoted foo\"",
BAR = "This is bar",
MAGIC = "42", # N.B. string
SWITCH_1 = "True", # Unfortunately this is due to str(bool(1))
SWITCH_2 = "true",
EMPTY = "",
EXTERNAL = "Outside world",
EXTERNAL_NOTSET = "",
)
assert expect == config.environment
def test_env_top_list(self, monkeypatch):
'''Top-level environment can be loaded (list)'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
environment:
- FOO=This is foo # No quotes
- FOO_WITH_QUOTES="Quoted foo" # Quotes included in value
- BAR=This is bar
- MAGIC=42
- SWITCH_2=true
- EMPTY=
- EXTERNAL # Comes from os env
- EXTERNAL_NOTSET # Missing in os env
''')
monkeypatch.setenv('EXTERNAL', 'Outside world')
monkeypatch.delenv('EXTERNAL_NOTSET', raising=False)
config = scuba.config.load_config('.scuba.yml')
expect = dict(
FOO = "This is foo",
FOO_WITH_QUOTES = "\"Quoted foo\"",
BAR = "This is bar",
MAGIC = "42", # N.B. string
SWITCH_2 = "true",
EMPTY = "",
EXTERNAL = "Outside world",
EXTERNAL_NOTSET = "",
)
assert expect == config.environment
def test_env_alias(self):
'''Alias can have environment'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
aliases:
al:
script: Don't care
environment:
FOO: Overridden
MORE: Hello world
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['al'].environment == dict(
FOO = "Overridden",
MORE = "Hello world",
)
############################################################################
# Entrypoint
def test_entrypoint_not_set(self):
'''Entrypoint can be missing'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
''')
config = scuba.config.load_config('.scuba.yml')
assert config.entrypoint is None
def test_entrypoint_null(self):
'''Entrypoint can be set to null'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint:
''')
config = scuba.config.load_config('.scuba.yml')
assert config.entrypoint == '' # Null => empty string
def test_entrypoint_invalid(self):
'''Entrypoint of incorrect type raises ConfigError'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: 666
''')
self._invalid_config('must be a string')
def test_entrypoint_emptry_string(self):
'''Entrypoint can be set to an empty string'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: ""
''')
config = scuba.config.load_config('.scuba.yml')
assert config.entrypoint == ''
def test_entrypoint_set(self):
'''Entrypoint can be set'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: my_ep
''')
config = scuba.config.load_config('.scuba.yml')
assert config.entrypoint == 'my_ep'
def test_alias_entrypoint_null(self):
'''Entrypoint can be set to null via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: na_ep
aliases:
testalias:
entrypoint:
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].entrypoint == '' # Null => empty string
def test_alias_entrypoint_empty_string(self):
'''Entrypoint can be set to an empty string via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: na_ep
aliases:
testalias:
entrypoint: ""
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].entrypoint == ''
def test_alias_entrypoint(self):
'''Entrypoint can be set via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
entrypoint: na_ep
aliases:
testalias:
entrypoint: use_this_ep
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].entrypoint == 'use_this_ep'
############################################################################
# docker_args
def test_docker_args_not_set(self):
'''docker_args can be missing'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
''')
config = scuba.config.load_config('.scuba.yml')
assert config.docker_args is None
def test_docker_args_invalid(self):
'''docker_args of incorrect type raises ConfigError'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: 666
''')
self._invalid_config('must be a string')
def test_docker_args_null(self):
'''docker_args can be set to null'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args:
''')
config = scuba.config.load_config('.scuba.yml')
assert config.docker_args == []
def test_docker_args_set_empty_string(self):
'''docker_args can be set to empty string'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: ''
''')
config = scuba.config.load_config('.scuba.yml')
assert config.docker_args == [] # '' -> [] after shlex.split()
def test_docker_args_set(self):
'''docker_args can be set'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
''')
config = scuba.config.load_config('.scuba.yml')
assert config.docker_args == ['--privileged']
def test_docker_args_set_multi(self):
'''docker_args can be set to multiple args'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged -v /tmp/:/tmp/
''')
config = scuba.config.load_config('.scuba.yml')
assert config.docker_args == ['--privileged', '-v', '/tmp/:/tmp/']
def test_alias_docker_args_null(self):
'''docker_args can be set to null via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args:
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == []
def test_alias_docker_args_empty_string(self):
'''docker_args can be set to empty string via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: ''
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == []
def test_alias_docker_args_set(self):
'''docker_args can be set via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: -v /tmp/:/tmp/
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == ['-v', '/tmp/:/tmp/']
def test_alias_docker_args_override(self):
'''docker_args can be tagged for override'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: !override -v /tmp/:/tmp/
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == ['-v', '/tmp/:/tmp/']
assert isinstance(config.aliases['testalias'].docker_args, scuba.config.OverrideMixin)
def test_alias_docker_args_override_implicit_null(self):
'''docker_args can be overridden with an implicit null value'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: !override
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == []
assert isinstance(config.aliases['testalias'].docker_args, scuba.config.OverrideMixin)
def test_alias_docker_args_override_from_yaml(self):
'''!override tag can be applied before a !from_yaml tag'''
with open('args.yml', 'w') as f:
f.write('args: -v /tmp/:/tmp/\n')
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: !override '!from_yaml args.yml args'
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == ['-v', '/tmp/:/tmp/']
assert isinstance(config.aliases['testalias'].docker_args, scuba.config.OverrideMixin)
def test_alias_docker_args_from_yaml_override(self):
'''!override tag can be applied inside of a !from_yaml tag'''
with open('args.yml', 'w') as f:
f.write('args: !override -v /tmp/:/tmp/\n')
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
docker_args: --privileged
aliases:
testalias:
docker_args: !from_yaml args.yml args
script:
- ugh
''')
config = scuba.config.load_config('.scuba.yml')
assert config.aliases['testalias'].docker_args == ['-v', '/tmp/:/tmp/']
assert isinstance(config.aliases['testalias'].docker_args, scuba.config.OverrideMixin)
############################################################################
# volumes
def test_volumes_not_set(self):
'''volumes can be missing'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
''')
config = scuba.config.load_config('.scuba.yml')
assert config.volumes is None
def test_volumes_null(self):
'''volumes can be set to null'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
''')
config = scuba.config.load_config('.scuba.yml')
assert config.volumes == None
def test_volumes_invalid(self):
'''volumes of incorrect type raises ConfigError'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes: 666
''')
self._invalid_config('must be a dict')
def test_volumes_invalid_volume_type(self):
'''volume of incorrect type (list) raises ConfigError'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
/foo:
- a list makes no sense
''')
self._invalid_config('must be string or dict')
def test_volumes_null_volume_type(self):
'''volume of None type raises ConfigError'''
# NOTE: In the future, we might want to support this as a volume
# (non-bindmount, e.g. '-v /somedata'), or as tmpfs
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
/bar:
''')
self._invalid_config('hostpath')
def test_volume_as_dict_missing_hostpath(self):
'''volume of incorrect type raises ConfigError'''
# NOTE: In the future, we might want to support this as a volume
# (non-bindmount, e.g. '-v /somedata'), or as tmpfs
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
/bar:
options: hostpath,is,missing
''')
self._invalid_config('hostpath')
def test_volumes_simple_volume(self):
'''volumes can be set using the simple form'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
/cpath: /hpath
''')
config = scuba.config.load_config('.scuba.yml')
assert len(config.volumes) == 1
v = config.volumes['/cpath']
assert v.container_path == '/cpath'
assert v.host_path == '/hpath'
def test_volumes_complex(self):
'''volumes can be set using the complex form'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
volumes:
/foo: /host/foo
/bar:
hostpath: /host/bar
/snap:
hostpath: /host/snap
options: z,ro
''')
config = scuba.config.load_config('.scuba.yml')
vols = config.volumes
assert len(vols) == 3
v = vols['/foo']
assert isinstance(v, scuba.config.ScubaVolume)
assert v.container_path == '/foo'
assert v.host_path == '/host/foo'
assert v.options == []
v = vols['/bar']
assert isinstance(v, scuba.config.ScubaVolume)
assert v.container_path == '/bar'
assert v.host_path == '/host/bar'
assert v.options == []
v = vols['/snap']
assert isinstance(v, scuba.config.ScubaVolume)
assert v.container_path == '/snap'
assert v.host_path == '/host/snap'
assert v.options == ['z', 'ro']
def test_alias_volumes_set(self):
'''docker_args can be set via alias'''
with open('.scuba.yml', 'w') as f:
f.write(r'''
image: na
aliases:
testalias:
script:
- ugh
volumes:
/foo: /host/foo
/bar:
hostpath: /host/bar
options: z,ro
''')
config = scuba.config.load_config('.scuba.yml')
vols = config.aliases['testalias'].volumes
assert len(vols) == 2
v = vols['/foo']
assert isinstance(v, scuba.config.ScubaVolume)
assert v.container_path == '/foo'
assert v.host_path == '/host/foo'
assert v.options == []
v = vols['/bar']
assert isinstance(v, scuba.config.ScubaVolume)
assert v.container_path == '/bar'
assert v.host_path == '/host/bar'
assert v.options == ['z', 'ro']
``` |
{
"source": "JonathonRiley/advent_of_code",
"score": 3
} |
#### File: 2021/day14/efficient_solution.py
```python
import os
from typing import List, Tuple
from collections import defaultdict
from .example_chains import example_chains_after_steps
DIRNAME = os.path.dirname(__file__)
def read_file(path:str) -> List[int]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
starting_chain = [char for char in file.readline().strip()]
directions = list(map(lambda x: [ch.strip() for ch in x.split(' -> ')], [row for row in file.readlines() if '->' in row]))
return starting_chain, directions
class Polymer:
def __init__(self, starting_chain:List[str], directions: List[Tuple[str,str]]):
self.starting_chain = starting_chain
self.directions = directions
self.pairs = self.build_initial_pairs()
self.first = starting_chain[0]
self.last = starting_chain[-1]
def build_initial_pairs(self) -> dict:
pairs = {}
for i in range(len(self.starting_chain)-1):
pair = ''.join(self.starting_chain[i:i+2])
if pair in pairs:
pairs[''.join(self.starting_chain[i:i+2])] += 1
else:
pairs[''.join(self.starting_chain[i:i+2])] = 1
return pairs
def apply_rules(self) -> None:
new_pairs = defaultdict(lambda x: 0)
for pair, insert_ in self.directions:
first_pair = pair[0]+insert_
second_pair = insert_ + pair[1]
if pair in self.pairs:
if first_pair in new_pairs:
new_pairs[first_pair] += self.pairs[pair]
else:
new_pairs[first_pair] = self.pairs[pair]
if second_pair in new_pairs:
new_pairs[second_pair] += self.pairs[pair]
else:
new_pairs[second_pair] = self.pairs[pair]
del self.pairs[pair]
for pair, freq in self.pairs:
new_pairs[pair] += freq
self.pairs = new_pairs
def calculate_element_range(self) -> int:
elements = dict()
for pair, freq in self.pairs.items():
if pair[0] in elements:
elements[pair[0]] += freq
else:
elements[pair[0]] = freq
if pair[1] in elements:
elements[pair[1]] += freq
else:
elements[pair[1]] = freq
elements[self.first] += 1
elements[self.last] += 1
elements = {key:val//2 for key, val in elements.items()}
elements_sorted = sorted(list(elements.items()), key=lambda x: x[1])
return abs(elements_sorted[-1][1] - elements_sorted[0][1])
if __name__ == '__main__':
example_chain, example_directions = read_file('example.txt')
actual_chain, actual_directions = read_file('input.txt')
# # part 1
example_polymer = Polymer(example_chain, example_directions)
for i in range(10):
example_polymer.apply_rules()
assert example_polymer.calculate_element_range() == 1588
actual_polymer = Polymer(actual_chain, actual_directions)
for i in range(10):
actual_polymer.apply_rules()
print(f'Part 1 solution: {actual_polymer.calculate_element_range()}')
# part 2
for i in range(30):
example_polymer.apply_rules()
assert example_polymer.calculate_element_range() == 2188189693529
for i in range(30):
actual_polymer.apply_rules()
print(f'Part 2 solution: {actual_polymer.calculate_element_range()}')
```
#### File: 2021/day14/inefficient_solution.py
```python
import os
from typing import List, Tuple
from collections import Counter
from .example_chains import example_chains_after_steps
DIRNAME = os.path.dirname(__file__)
def read_file(path:str) -> List[int]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
starting_chain = [char for char in file.readline().strip()]
directions = list(map(lambda x: [ch.strip() for ch in x.split(' -> ')], [row for row in file.readlines() if '->' in row]))
return starting_chain, directions
class Link:
def __init__(self, value:str):
self.value = value
self.next: Link = None
class Polymer:
def __init__(self, starting_chain: List[str], directions: Tuple[str,str]):
self.first = None
self.directions = directions
self.build_initial_chain(starting_chain)
def build_initial_chain(self, starting_chain: List[str]):
self.first = Link(starting_chain[0])
link = self.first
for char in starting_chain[1:]:
link.next = Link(char)
link = link.next
def apply_rules(self) -> None:
link = self.first
while link.next is not None:
future_link = link.next
for rule, insert_ in self.directions:
if link.value == rule[0] and future_link.value == rule[1]:
link.next = Link(insert_)
link.next.next = future_link
link = future_link
def fetch_chain(self) -> str:
chain = ''
if self.first is not None:
link: Link = self.first
chain += link.value
while link.next is not None:
link = link.next
chain += link.value
return chain
def calculate_element_range(self) -> int:
chain_freq = Counter([char for char in self.fetch_chain()])
sorted_freq = sorted(list(chain_freq.items()), key=lambda x: x[1])
return abs(sorted_freq[-1][1] - sorted_freq[0][1])
if __name__ == '__main__':
example_chain, example_directions = read_file('example.txt')
actual_chain, actual_directions = read_file('input.txt')
# # part 1
example_polymer = Polymer(example_chain, example_directions)
assert example_polymer.fetch_chain() == 'NNCB'
for i in range(10):
example_polymer.apply_rules()
if i < 4: assert example_polymer.fetch_chain() == example_chains_after_steps[i]
assert example_polymer.calculate_element_range() == 1588
actual_polymer = Polymer(actual_chain, actual_directions)
assert actual_polymer.fetch_chain() == ''.join(actual_chain)
for i in range(10):
actual_polymer.apply_rules()
print(f'Part 1 solution: {actual_polymer.calculate_element_range()}')
# part 2
for i in range(30):
example_polymer.apply_rules()
assert example_polymer.calculate_element_range() == 2188189693529
for i in range(30):
actual_polymer.apply_rules()
print(f'Part 2 solution: {actual_polymer.calculate_element_range()}')
```
#### File: 2021/day1/solution.py
```python
import os
from typing import List
DIRNAME = os.path.dirname(__file__)
def read_file(path:str) -> List[int]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
return list(map(lambda x: int(x.strip()), file.readlines()))
def individual_increase(data: List[int]) -> int:
last = data[0]
increases = 0
for step in data[1:]:
if step > last:
increases+=1
last = step
return increases
def window_increase(data: List[int]) -> int:
increases = 0
for step_index in range(len(data[3:])):
step_ahead = data[3+step_index]
step_behind = data[step_index]
if step_ahead > step_behind:
increases +=1
return increases
if __name__ == '__main__':
example = read_file('example.txt')
actual = read_file('input.txt')
# part 1
assert individual_increase(example) == 7
print(f'Part 1 solution: {individual_increase(actual)}')
# part 2
assert window_increase(example) == 5
print(f'Part 2 solution: {window_increase(actual)}')
```
#### File: 2021/day7/solution.py
```python
import os
from collections import Counter
from typing import List
DIRNAME = os.path.dirname(__file__)
def read_file(path:str) -> List[List[int]]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
return list(map(int, file.readline().strip().split(',')))
def calculate_fuel_used(positions:List[int], linear:bool=True) -> int:
positions_ = dict(Counter(positions))
min_pos = min(positions_.keys())
max_pos = max(positions_.keys())
return min([sum([fuel_use(pos, move_to, linear)*fish for pos, fish in positions_.items()]) for move_to in range(min_pos, max_pos +1)])
def fuel_use(pos:int, move_to:int, linear:bool) -> int:
if linear:
return abs(move_to - pos)
else:
n = abs(move_to - pos)
return n * (n+1) // 2
if __name__ == '__main__':
example = read_file('example.txt')
actual = read_file('input.txt')
# part 1
assert calculate_fuel_used(example) == 37
print(f'Part 1 solution: {calculate_fuel_used(actual)}')
# part 2
assert calculate_fuel_used(example, linear=False) == 168
print(f'Part 2 solution: {calculate_fuel_used(actual, linear=False)}')
```
#### File: 2021/day8/solution.py
```python
import os
import itertools
from collections import Counter
from typing import List, Tuple
DIRNAME = os.path.dirname(__file__)
def read_file(path:str) -> Tuple[List[List[List[str]]],List[List[List[str]]]]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
input_, output_ = list(zip(*list(map(lambda x: x.strip().split(' | '), file.readlines()))))
parse = lambda y: list(map(lambda x: [[char for char in dig] for dig in x.split(' ')], y))
return parse(input_), parse(output_)
def find_easy_digits(data:List[List[List[str]]]) -> int:
counter = 0
for row in data:
counter += sum(list(map(lambda x: len(x) in [2,3,4,7], row)))
return counter
def find_output_sum(input_:List[List[List[str]]], output_:List[List[List[str]]]) -> int:
correct = {1:{'c','f'},
2:{'a','c','d','e','g'},
3:{'a','c','d','f','g'},
4:{'b','c','d','f'},
5:{'a','b','d','f','g'},
6:{'a','b','d','e','f','g'},
7:{'a','c','f'},
8:{'a','b','c','d','e','f','g'},
9:{'a','b','c','d','f','g'},
0:{'a','b','c','e','f','g'}}
freq = {chr(97+i):0 for i in range(7)}
for segments in correct.values():
for char in segments:
freq[char] +=1
scores = {sum([freq.get(char) for char in segments]): val for val, segments in correct.items()}
output_score = 0
for in_, out_ in zip(input_, output_):
local_freq = {chr(97+i):0 for i in range(7)}
for segments in in_:
for char in segments:
local_freq[char] += 1
local_scores = [scores.get(sum([local_freq.get(char) for char in segments])) * 10**(3-i) for i, segments in enumerate(out_)]
output_score += sum(local_scores)
return output_score
if __name__ == '__main__':
example_input, example_output = read_file('example.txt')
actual_input, actual_output = read_file('input.txt')
# part 1
assert find_easy_digits(example_output) == 26
print(f'Part 1 solution: {find_easy_digits(actual_output)}')
# part 2
assert find_output_sum(example_input, example_output) == 61229
print(f'Part 2 solution: {find_output_sum(actual_input, actual_output)}')
``` |
{
"source": "jonathonw/pypush2",
"score": 3
} |
#### File: pypush2/examples/uidemo.py
```python
import pypush2.ui
import pypush2.colors as colors
from pypush2.encoders import EncoderAction
def dialChanged(sender, action):
if action == EncoderAction.down:
newValue = sender.value - 0.1
if newValue >= sender.min_value:
sender.value = newValue
else:
sender.value = sender.min_value
elif action == EncoderAction.down_fast:
newValue = sender.value - 0.2
if newValue >= sender.min_value:
sender.value = newValue
else:
sender.value = sender.min_value
elif action == EncoderAction.up:
newValue = sender.value + 0.1
if newValue <= sender.max_value:
sender.value = newValue
else:
sender.value = sender.max_value
elif action == EncoderAction.up_fast:
newValue = sender.value + 0.2
if newValue <= sender.max_value:
sender.value = newValue
else:
sender.value = sender.max_value
def main():
pushUi = pypush2.ui.PushUi()
tab1 = pypush2.ui.Tab("First Tab")
tab1.add_action(pypush2.ui.Action("Action 1"))
tab1.add_action(pypush2.ui.Action("Action 2"))
tab1.add_action(pypush2.ui.Action("Action 3", colors.PUSH_COLORS_DICT["dup_green"]))
tab1.add_action(pypush2.ui.Action("Action 4"))
dial1 = pypush2.ui.Dial("Dial 1", 3.4, 0.0, 10.0)
dial1.on_change += dialChanged
tab1.add_dial(dial1)
dial2 = pypush2.ui.Dial("Dial 2", 10.0, 0.0, 10.0)
dial2.on_change += dialChanged
tab1.add_dial(dial2)
dial3 = pypush2.ui.Dial("Dial 3", 0.0, 0.0, 10.0)
dial3.on_change += dialChanged
tab1.add_dial(dial3)
dial4 = pypush2.ui.Dial("Dial 4", 0.001, 0.0, 10.0, colors.PUSH_COLORS_DICT["red"], colors.PUSH_COLORS_DICT["cedar_wood_finish"], colors.PUSH_COLORS_DICT["white"])
dial4.on_change += dialChanged
tab1.add_dial(dial4)
dial5 = pypush2.ui.Dial("Dial 5", 9.999, 0.0, 10.0)
dial5.on_change += dialChanged
tab1.add_dial(dial5)
pushUi.add_tab(tab1)
tab2 = pypush2.ui.Tab("Second Tab")
pushUi.add_tab(tab2)
tab3 = pypush2.ui.Tab("Third Tab", colors.PUSH_COLORS_DICT["hollywood_cerise"], colors.PUSH_COLORS_DICT["purple"], colors.PUSH_COLORS_DICT["lavender_magenta"])
dial31 = pypush2.ui.Dial("Some Dial", 5.9, 0.0, 10.0)
dial31.on_change += dialChanged
tab3.add_dial(dial31)
dial32 = pypush2.ui.Dial("Yet Another Dial", 2.1, 0.0, 10.0)
dial32.on_change += dialChanged
tab3.add_dial(dial32)
dial33 = pypush2.ui.Dial("This dial has a long name", 6.6, 0.0, 10.0)
dial33.on_change += dialChanged
tab3.add_dial(dial33)
dial34 = pypush2.ui.Dial("This dial is a different color", 5.0, 0.0, 10.0, colors.PUSH_COLORS_DICT["red"], colors.PUSH_COLORS_DICT["cedar_wood_finish"], colors.PUSH_COLORS_DICT["white"])
dial34.on_change += dialChanged
tab3.add_dial(dial34)
dial35 = pypush2.ui.Dial("Moo", 2.2, 0.0, 10.0)
dial35.on_change += dialChanged
tab3.add_dial(dial35)
pushUi.add_tab(tab3)
pushUi.run()
if __name__ == "__main__":
main()
```
#### File: pypush2/pypush2/device.py
```python
import time
import mido
import pypush2.buttons, pypush2.pads, pypush2.encoders, pypush2._utils.events
class Device(object):
def __init__(self):
self.on_button_press = pypush2._utils.events.EventHandler(self)
'''
Event raised when a button is pressed. Handlers should be of the form:
handler(sender, button)
'''
self.on_button_release = pypush2._utils.events.EventHandler(self)
'''
Event raised when a button is released. Handlers should be of the form:
handler(sender, button)
'''
self.on_pad_touch = pypush2._utils.events.EventHandler(self)
'''
Event raised when a pad is touched. Handlers should be of the form:
handler(sender, padNote, velocity)
'''
self.on_pad_release = pypush2._utils.events.EventHandler(self)
'''
Event raised when a pad is released. Handlers should be of the form:
handler(sender, padNote, velocity)
'''
# Encoders not yet implemented-- need further work on the encoder
# abstraction before they can make sense
self.on_encoder_touch = pypush2._utils.events.EventHandler(self)
self.on_encoder_release = pypush2._utils.events.EventHandler(self)
self.on_encoder_change = pypush2._utils.events.EventHandler(self)
self.on_unhandled_midi_message = pypush2._utils.events.EventHandler(self)
'''
Event raised when a MIDI message is received but isn't handled by
one of the other event types. Handlers should be of the form:
handler(sender, midiMessage)
where midiMessage is a mido.Message.
'''
self._midi_input = mido.open_input('Ableton Push 2 Live Port')
self._midi_output = mido.open_output('Ableton Push 2 Live Port')
def __del__(self):
self.close()
def close(self):
if not self._midi_input.closed:
self._midi_input.close()
if not self._midi_output.closed:
self._midi_output.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_midi_message(self, message):
'''
Sends a raw MIDI message. message should be a mido.Message.
'''
self._midi_output.send(message)
def listen(self):
'''
Starts listening for MIDI messages. This method blocks indefinitely,
until KeyboardInterrupt is received (^C).
'''
# Clear out queued messages (Push sends a bunch of stuff on startup
# that we don't care about, and will also queue up messages that
# happen while nothing is running to receive them.)
#for msg in self._midi_input.iter_pending():
# print msg
# pass
time.sleep(0.1)
while self._midi_input.receive(block = False) != None:
pass
try:
self._midi_input.callback = self._on_midi_message_received
# Setting callback spawns off a background thread within mido that
# actually handles MIDI data-- we need to loop here so we don't
# terminate prematurely.
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
def _on_midi_message_received(self, message):
if message.type == "control_change":
if pypush2.buttons.is_button(message.control):
if message.value == pypush2.buttons.BUTTON_PRESSED_VALUE:
self.on_button_press(pypush2.buttons.Buttons[message.control])
return
elif message.value == pypush2.buttons.BUTTON_RELEASED_VALUE:
self.on_button_release(pypush2.buttons.Buttons[message.control])
return
elif pypush2.encoders.cc_is_display_encoder(message.control):
encoderNumber = pypush2.encoders.get_encoder_number_from_cc(message.control)
action = pypush2.encoders.convert_encoder_cc_value(message.value)
self.on_encoder_change(encoderNumber, action)
elif message.type == "note_on":
if pypush2.pads.is_pad(message.note):
self.on_pad_touch(message.note, message.velocity)
return
elif pypush2.encoders.note_is_display_encoder(message.note):
if message.velocity == 127:
self.on_encoder_touch(message.note - pypush2.encoders._DISPLAY_ENCODER_BASE_NOTE)
elif message.velocity == 0:
self.on_encoder_release(message.note - pypush2.encoders._DISPLAY_ENCODER_BASE_NOTE)
elif message.type == "note_off":
if pypush2.pads.is_pad(message.note):
self.on_pad_release(message.note, message.velocity)
return
self.on_unhandled_midi_message(message)
```
#### File: pypush2/pypush2/encoders.py
```python
import flufl.enum
_TEMPO_ENCODER_NOTE = 10
_TEMPO_ENCODER_CC = 14
_SWING_ENCODER_NOTE = 9
_SWING_ENCODER_CC = 15
_DISPLAY_ENCODER_BASE_NOTE = 0
_DISPLAY_ENCODER_BASE_CC = 71
_DISPLAY_ENCODER_COUNT = 8
_MASTER_ENCODER_NOTE = 8
_MASTER_ENCODER_CC = 79
class EncoderAction(flufl.enum.IntEnum):
down_fast = -2
down = -1
no_change = 0
up = 1
up_fast = 2
def convert_encoder_cc_value(value):
if value > 64:
return value - 128
else:
return value
def note_is_encoder(note):
"""
Returns true if the given note number corresponds to an encoder touched message.
"""
return (note == _TEMPO_ENCODER_NOTE or
note == _SWING_ENCODER_NOTE or
(_DISPLAY_ENCODER_BASE_NOTE <= note and note < (_DISPLAY_ENCODER_BASE_NOTE + _DISPLAY_ENCODER_COUNT)) or
note == _MASTER_ENCODER_NOTE
)
def note_is_display_encoder(note):
"""
Returns true if the given note number corresponds to an encoder touched message for
one of the encoders above the display
"""
return (_DISPLAY_ENCODER_BASE_NOTE <= note and note < (_DISPLAY_ENCODER_BASE_NOTE + _DISPLAY_ENCODER_COUNT))
def cc_is_display_encoder(cc):
"""
Returns true if the given note number corresponds to an encoder touched message for
one of the encoders above the display
"""
return (_DISPLAY_ENCODER_BASE_CC <= cc and cc < (_DISPLAY_ENCODER_BASE_CC + _DISPLAY_ENCODER_COUNT))
def get_encoder_number_from_cc(cc):
return cc - _DISPLAY_ENCODER_BASE_CC
```
#### File: pypush2/pypush2/ui.py
```python
import pypush2.display, pypush2.colors, pypush2.device, pypush2.buttons, pypush2._utils.events
import cairocffi
import mido
import threading
from math import pi
from collections import deque
color = pypush2.colors.PUSH_COLORS_DICT["azure_radiance"]
dark_color = pypush2.colors.PUSH_COLORS_DICT["midnight"]
highlight_color = pypush2.colors.PUSH_COLORS_DICT["white"]
class PushUi(object):
def __init__(self):
self._displayThread = _DisplayThread()
def run(self):
self._displayThread.start()
try:
with pypush2.device.Device() as pushDevice:
self._setup_button_leds(pushDevice)
pushDevice.on_button_release += self._on_button_pressed
pushDevice.on_encoder_touch += self._on_encoder_touched
pushDevice.on_encoder_release += self._on_encoder_released
pushDevice.on_encoder_change += self._on_encoder_changed
self._displayThread.update_display_button_colors(pushDevice)
print "Listening to Push..."
pushDevice.listen()
finally:
self._displayThread.cancel()
self._displayThread.join()
def add_tab(self, new_tab):
self._displayThread.add_tab(new_tab)
def set_current_tab(self, tab_index):
self._displayThread.set_current_tab(tab_index)
def _setup_button_leds(self, pushDevice):
for i in range(0,128):
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=i, value=0))
for i in range(36,100):
pushDevice.send_midi_message(mido.Message('note_off', channel=0, note=i, velocity=0))
def _on_encoder_touched(self, sender, encoderNumber):
self._displayThread.highlight_gauge(encoderNumber)
def _on_encoder_released(self, sender, encoderNumber):
self._displayThread.unhighlight_gauge(encoderNumber)
def _on_button_pressed(self, sender, button):
if(pypush2.buttons.is_display_button(button) and
pypush2.buttons.get_display_button_group(button) == pypush2.buttons.DisplayButtonGroups.bottom):
def fn():
try:
self.set_current_tab(pypush2.buttons.get_display_button_index(button))
self._displayThread.update_display_button_colors(sender)
except IndexError:
pass
self._displayThread.run_operation_in_ui_thread(fn)
def _on_encoder_changed(self, sender, encoder, action):
def fn():
if len(self._displayThread._tabs) > 0:
tab = self._displayThread._tabs[self._displayThread._current_tab]
if encoder < len(tab._dials):
dial = tab._dials[encoder]
dial.on_change(action)
self._displayThread.run_operation_in_ui_thread(fn)
class Tab(object):
def __init__(self, title, active_color=color, inactive_color=dark_color, highlight_color=highlight_color):
self.title = title
self.active_color = active_color
self.inactive_color = inactive_color
self.highlight_color = highlight_color
self.tab_selected = pypush2._utils.events.EventHandler(self)
self.tab_deselected = pypush2._utils.events.EventHandler(self)
self._actions = []
self._dials = []
def add_action(self, new_action):
self._actions.append(new_action)
def add_dial(self, new_dial):
self._dials.append(new_dial)
def _draw(self, context, index, highlighted):
_drawLabel(context, self.title, highlighted, (5 + 120*index, pypush2.display.DisplayParameters.DISPLAY_HEIGHT-23), self.active_color)
class Action(object):
def __init__(self, title, color=None):
self.title = title
self.color = color
self.on_action = pypush2._utils.events.EventHandler(self)
def _draw(self, context, index, inherited_color):
color = self.color or inherited_color
_drawLabel(context, self.title, False, (5 + 120*index, 3), color)
class Dial(object):
def __init__(self, title, initial_value, min_value, max_value, active_color=None, inactive_color=None, highlight_color=None):
self.title = title
self.value = initial_value
self.min_value = min_value
self.max_value = max_value
self.active_color = active_color
self.inactive_color = inactive_color
self.highlight_color = highlight_color
self.on_change = pypush2._utils.events.EventHandler(self)
self.value_format = None
def _draw(self, context, index, highlighted, inherited_active_color, inherited_inactive_color, inherited_highlight_color):
active_color = self.active_color or inherited_active_color
inactive_color = self.inactive_color or inherited_inactive_color
highlight_color = self.highlight_color or inherited_highlight_color
_drawDial(context, self.title, self.value, self.min_value, self.max_value, highlighted, (5 + 120*index, 25), active_color, inactive_color, highlight_color)
labels = ["Label", "ayayay", "Long label with really long name and stuff", "Hi There!", "Another label", "More labels!!!!!", "Moo", "Stuff"]
class _DisplayThread(pypush2.display.DisplayRenderer):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
pypush2.display.DisplayRenderer.__init__(self, group=group, target=target, name=name, args=args, kwargs=kwargs)
self._tabs = []
self._highlightedGaugeLock = threading.Lock()
self._highlightedGauges = set()
self._current_tab = 0
self._operation_queue = deque()
def run_operation_in_ui_thread(self, operation):
self._operation_queue.appendleft(operation) # no locking needed here; deque operations are guaranteed to be atomic
def add_tab(self, new_tab):
self._tabs.append(new_tab)
def set_current_tab(self, tab_index):
if tab_index < 0 or tab_index >= len(self._tabs):
raise IndexError("Tab index out of bounds")
else:
self._current_tab = tab_index
def update_display_button_colors(self, pushDevice):
for i in range(8):
# Bottom row: tab buttons
if i < len(self._tabs):
if i == self._current_tab:
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=(pypush2.buttons.Buttons.bottom_display_0 + i), value=self._tabs[i].active_color.push_color_index))
else:
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=(pypush2.buttons.Buttons.bottom_display_0 + i), value=self._tabs[i].inactive_color.push_color_index))
else:
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=(pypush2.buttons.Buttons.bottom_display_0 + i), value=pypush2.colors.PUSH_COLORS_DICT["black"].push_color_index))
# Top row: action buttons
if len(self._tabs) > 0 and i < len(self._tabs[self._current_tab]._actions):
actionColor = self._tabs[self._current_tab]._actions[i].color or self._tabs[self._current_tab].active_color
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=(pypush2.buttons.Buttons.top_display_0 + i), value=actionColor.push_color_index))
else:
pushDevice.send_midi_message(mido.Message('control_change', channel=0, control=(pypush2.buttons.Buttons.top_display_0 + i), value=pypush2.colors.PUSH_COLORS_DICT["black"].push_color_index))
def highlight_gauge(self, gauge):
with self._highlightedGaugeLock:
self._highlightedGauges.add(gauge)
def unhighlight_gauge(self, gauge):
with self._highlightedGaugeLock:
self._highlightedGauges.discard(gauge)
def shouldHighlightGauge(self, gauge):
with self._highlightedGaugeLock:
return gauge in self._highlightedGauges
def paint(self, context):
# Perform scheduled UI thread operations
# Iterate across queue until it's out of elements
try:
while True:
operation = self._operation_queue.pop()
operation()
except IndexError:
pass
with context:
context.set_source_rgb(0, 0, 0)
context.paint()
for i, tab in enumerate(self._tabs):
tab._draw(context, i, i == self._current_tab)
if self._current_tab < len(self._tabs):
currentTab = self._tabs[self._current_tab]
for i, action in enumerate(currentTab._actions):
action._draw(context, i, currentTab.active_color)
for i, dial in enumerate(currentTab._dials):
dial._draw(context, i, self.shouldHighlightGauge(i), currentTab.active_color, currentTab.inactive_color, currentTab.highlight_color)
# for i in range(0, 8):
# _drawLabel(context, labels[i], i % 2 == 0, (5 + 120*i, 3))
# for i in range(0, 8):
# _drawLabel(context, labels[i], i % 2 == 1, (5 + 120*i, pypush2.display.DisplayParameters.DISPLAY_HEIGHT-23))
# for i in range(0, 8):
# _drawDial(context, labels[i], float(i) + 1.0, 0, 8, self.shouldHighlightGauge(i), (5 + 120*i, 25))
def _drawLabel(context, text, shouldFill, position, color):
with context:
context.rectangle(*position, width=110, height=20)
context.clip()
if shouldFill:
context.set_source_rgb(*color.rgb_color)
context.paint()
context.set_source_rgb(0, 0, 0)
else:
context.set_source_rgb(*color.rgb_color)
context.move_to(position[0] + 5, position[1] + 15)
context.select_font_face(family="Avenir")
context.set_font_size(13)
context.text_path(text)
context.fill()
def _drawDial(context, titleText, currentValue, minValue, maxValue, highlighted, position, primaryColor, secondaryColor, highlightColor):
positionX = position[0]
positionY = position[1]
with context:
dialColor = primaryColor.rgb_color
if highlighted:
dialColor = highlightColor.rgb_color
context.rectangle(*position, width=110, height=110)
context.clip()
# with context:
# context.set_source_rgb(*pypush2.colors.PUSH_COLORS_DICT["eerie_black"].rgb_color)
# context.paint()
context.set_source_rgb(*dialColor)
context.select_font_face(family="Avenir")
context.set_font_size(13)
_drawCenteredText(context, titleText, (positionX + (110.0 / 2.0), positionY + 18))
context.set_font_size(18)
_drawCenteredText(context, str(currentValue), (positionX + (110.0/2.0), positionY + 73))
valuePoint = (-5.0 * pi / 4.0) + ((currentValue - minValue) / (maxValue - minValue)) * (6.0 * pi / 4.0)
context.set_line_width(6)
context.set_line_cap(cairocffi.LINE_CAP_ROUND)
context.set_source_rgb(*secondaryColor.rgb_color)
context.arc(positionX + 55, positionY + 70, 35, valuePoint, pi / 4.0)
context.stroke()
context.set_source_rgb(*dialColor)
context.arc(positionX + 55, positionY + 70, 35, (-5.0 * pi / 4.0), valuePoint)
context.stroke()
def _drawCenteredText(context, text, bottomCenterPosition):
with context:
extents = context.text_extents(text)
context.move_to(bottomCenterPosition[0] - (extents[2] / 2.0), bottomCenterPosition[1])
context.text_path(text)
context.fill()
```
#### File: pypush2/tests/test_buttons.py
```python
import pytest
import pypush2.buttons
class TestButtons:
### Tests for buttons.is_display_button
def test_is_display_button_first_bottom_button(self):
button = pypush2.buttons.Buttons.bottom_display_0
assert pypush2.buttons.is_display_button(button) == True
def test_is_display_button_last_bottom_button(self):
button = pypush2.buttons.Buttons.bottom_display_7
assert pypush2.buttons.is_display_button(button) == True
def test_is_display_button_first_top_button(self):
button = pypush2.buttons.Buttons.top_display_0
assert pypush2.buttons.is_display_button(button) == True
def test_is_display_button_last_top_button(self):
button = pypush2.buttons.Buttons.top_display_7
assert pypush2.buttons.is_display_button(button) == True
def test_is_display_button_out_of_range_below_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_0 - 1
assert pypush2.buttons.is_display_button(button) == False
def test_is_display_button_out_of_range_above_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_7 + 1
assert pypush2.buttons.is_display_button(button) == False
def test_is_display_button_out_of_range_below_top(self):
button = pypush2.buttons.Buttons.top_display_0 - 1
assert pypush2.buttons.is_display_button(button) == False
def test_is_display_button_out_of_range_above_top(self):
button = pypush2.buttons.Buttons.top_display_7 + 1
assert pypush2.buttons.is_display_button(button) == False
### Tests for buttons.get_display_button_group
def test_get_group_first_bottom_button(self):
button = pypush2.buttons.Buttons.bottom_display_0
assert pypush2.buttons.get_display_button_group(button) == pypush2.buttons.DisplayButtonGroups.bottom
def test_get_group_last_bottom_button(self):
button = pypush2.buttons.Buttons.bottom_display_7
assert pypush2.buttons.get_display_button_group(button) == pypush2.buttons.DisplayButtonGroups.bottom
def test_get_group_first_top_button(self):
button = pypush2.buttons.Buttons.top_display_0
assert pypush2.buttons.get_display_button_group(button) == pypush2.buttons.DisplayButtonGroups.top
def test_get_group_last_top_button(self):
button = pypush2.buttons.Buttons.top_display_7
assert pypush2.buttons.get_display_button_group(button) == pypush2.buttons.DisplayButtonGroups.top
def test_get_group_out_of_range_below_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_0 - 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_group(button)
def test_get_group_out_of_range_above_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_7 + 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_group(button)
def test_get_group_out_of_range_below_top(self):
button = pypush2.buttons.Buttons.top_display_0 - 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_group(button)
def test_get_group_out_of_range_above_top(self):
button = pypush2.buttons.Buttons.top_display_7 + 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_group(button)
### Tests for buttons.get_display_button_index
def test_get_index_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_3
assert pypush2.buttons.get_display_button_index(button) == 3
def test_get_index_top(self):
button = pypush2.buttons.Buttons.top_display_5
assert pypush2.buttons.get_display_button_index(button) == 5
def test_get_index_out_of_range_below_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_0 - 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_index(button)
def test_get_index_out_of_range_above_bottom(self):
button = pypush2.buttons.Buttons.bottom_display_7 + 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_index(button)
def test_get_index_out_of_range_below_top(self):
button = pypush2.buttons.Buttons.top_display_0 - 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_index(button)
def test_get_index_out_of_range_above_top(self):
button = pypush2.buttons.Buttons.top_display_7 + 1
with pytest.raises(IndexError):
pypush2.buttons.get_display_button_index(button)
``` |
{
"source": "jonathoy/django-access-and-compliance",
"score": 2
} |
#### File: django-access-and-compliance/django_access_and_compliance/signals.py
```python
import requests
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from .config import access_and_compliance_group_name
logger = logging.getLogger(__name__)
def ensure_compliant(sender, request, user, **kwargs):
payload = {'uniqname': user.username}
response = requests.get(settings.ACCESS_AND_COMPLIANCE_VALIDATION_URL, params=payload)
response.raise_for_status()
group, created = Group.objects.get_or_create(name=access_and_compliance_group_name)
if _is_compliant(response):
group.user_set.add(user)
logger.debug(f'{user} has attested to the data access and compliance policy')
else:
group.user_set.remove(user)
logger.debug(f'{user} has not attested to data compliance policy')
def _is_compliant(response):
return response.text in settings.ACCESS_AND_COMPLIANCE_TRUTHY_VALUES
``` |
{
"source": "Jonathpc/company-flaskapp",
"score": 3
} |
#### File: company-flaskapp/app/main.py
```python
from app import app
from flask import request, render_template, flash, redirect
from app.forms import ContactForm, flash_errors
import os
import smtplib
@app.route("/")
def index():
return render_template("public/index.html")
@app.route("/contact", methods=("GET", "POST"))
def contact():
form = ContactForm()
flash_errors(form)
MAIL_PASS = request.environ['MAIL_PASS']
if form.validate_on_submit():
sender = "%s <%s>" % (form.name.data, form.email.data)
subject = "Subject: %s, %s" % (form.subject.data , form.email.data)
message = "From: %s, \n\n %s, \n\n %s" % (
sender, subject, form.body.data)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("sender_mail", MAIL_PASS)
server.sendmail("sender_mail",
"receiver_mail", message.encode('utf-8'))
flash("Your message was sent")
return redirect("/contact")
else:
flash_errors(form)
return render_template("public/contact.html", form=form)
``` |
{
"source": "jonaths/q_policy_plotter",
"score": 3
} |
#### File: jonaths/q_policy_plotter/main.py
```python
import numpy as np
from plotters.plotter import PolicyPlotter
import matplotlib.pyplot as plt
def main():
num_actions = 4
num_states = 20
# la entrada es una tabla con los estados en las filas y las acciones en las columnas
q_table = np.random.rand(num_states, num_actions)
print("INFO: q_table")
print(q_table)
# instanciar
# num_rows x num_cols debe ser igual a la longitud de q_table
# la coordenada (0,0) es q_table[0], la (1,0) es q_table[1]
plotter = PolicyPlotter(q_table, num_rows=4, num_cols=5)
# se pueden obtener summaries (max, min, o avg)
max_summary = plotter.summarize(op='max')
print("INFO: max summary")
print(max_summary)
# o recuperar la politica calculando la accion maxima de cada estado
# esto regresa el valor maximo y los indices de cada estado, si se pasan labels
# regresa esos labels en lugar del indice numerico
summary, indices = plotter.get_policy(labels=['a', 'b', 'c', 'd'])
print("INFO: policy")
print(summary)
print(indices)
# tambien se puede generar el mapa de politicas
fig, ax = plt.subplots()
# im, cbar, texts = plotter.build_policy(labels=['a', 'b', 'c', 'd'], show_numbers=False, cmap='Reds')
im, cbar = plotter.build_heatmap(index=0)
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main()
```
#### File: q_policy_plotter/plotters/history.py
```python
class History:
"""
Clase para llevar los registros de transiciones de un MDP
"""
def __init__(self):
self.history = []
def insert(self, transition_tuple):
"""
Inserta una nueva transicion (s,a,r,s')
:param transition_tuple:
:return:
"""
if len(transition_tuple) != 4:
raise Exception('Invalid transition. Required (s,a,r,s')
self.history.append(transition_tuple)
return self
def get_total_reward(self):
"""
Recupera el total de recompensas recibidas en el historial.
:return:
"""
total = 0
for h in self.history:
total += h[2]
return total
def get_steps_count(self):
"""
Regresa el numero de transiciones almacenadas.
:return:
"""
return len(self.history)
def get_state_sequence(self):
"""
Recupera la secuencia de estados.
:return:
"""
if len(self.history) < 1:
return []
sequence = [self.history[0][0]]
for s in self.history:
sequence.append(s[3])
return sequence
def clear(self):
self.history = []
return self
```
#### File: q_policy_plotter/plotters/line_plotter.py
```python
import numpy as np
import sys
import matplotlib.pyplot as plt
class LinesPlotter:
def __init__(self, var_names_list, num_experiments, num_episodes):
pass
self.var_names_list = var_names_list
self.num_episodes = num_episodes
self.num_experiments = num_experiments
self.data = np.zeros(shape=(num_experiments, num_episodes, len(var_names_list)))
self.summary = None
self.std_dev = None
# avg + 1/2 std_dev
self.std_dev_top = None
# avg - 1/2 std_dev
self.std_dev_bottom = None
def add_episode_to_experiment(self, experiment, episode, var_values):
"""
Agrega un episodio al experimento
:param experiment: el numero de experimento
:param episode: el numero de episodio
:param var_values: una lista de valores en el mismo orden que var_name_list
:return:
"""
if experiment >= self.num_experiments or episode >= self.num_episodes:
return self
self.data[experiment, episode, :] = np.array(var_values)
return self
def calculate_summary(self, func='average'):
"""
Crea un summary de los valores almacenados en el historial
:param func: la operacion de summary (average, max)
:return:
"""
temp = np.transpose(self.data, (2, 1, 0))
print(temp)
if func == 'average':
self.summary = np.average(temp, axis=2)
self.std_dev = np.std(temp, axis=2)
half = self.std_dev / 2
self.std_dev_top = self.summary + half
self.std_dev_bottom = self.summary - half
elif func == 'max':
self.summary = np.max(temp, axis=2)
else:
raise Exception('Invalid summary operation')
return self
def get_var_from_summary(self, var_name):
"""
Wrapper para get_var_from_array que recupera la variable de self.summary
:param var_name:
:return:
"""
return self.get_var_from_array(var_name, self.summary)
def get_var_from_array(self, var_name, array):
"""
Usa los nombres creados en el constructor para recuperar los valores
de una variable
:param var_name: el nombre de la variable
:param array:
:return:
"""
# Si se pasa un valor regresa unicamente ese elemento
if var_name in self.var_names_list:
index = self.var_names_list.index(var_name)
summary_pickled = np.expand_dims(array[index], axis=0)
else:
raise Exception('Invalid var_name. ')
return summary_pickled
def get_var_from_data(self, var_name):
# Si se pasa un valor regresa unicamente ese elemento
if var_name in self.var_names_list:
index = self.var_names_list.index(var_name)
data_pickled = self.data[:, :, index]
else:
raise Exception('Invalid var_name. ')
return data_pickled
@staticmethod
def convolve(data, window_size):
"""
Media movil
:param data: la serie de datos
:param window_size: el tamano de la ventana movil
:return:
"""
data = np.pad(data, (window_size // 2, window_size - 1 - window_size // 2), mode='edge')
data = np.convolve(data, np.ones((window_size,)) / window_size, mode='valid')
return data
def get_var_line_plot(self, var_name_list, func, linestyle=None, window_size=20, fig=None,
ax=None, label=None):
"""
Genera una grafica lineal con los datos de los experimentos procesados con alguna funcion
summary,
:param label:
:param var_name_list: las variables del experimento a recuperar.
:param func: la funcion summary
:param linestyle: el estilo de linea de la serie
:param window_size: tamano de la ventana
:param fig: on objeto figura o lo crea
:param ax: un objeto ax o lo crea
:return:
"""
if fig is None and ax is None:
fig, ax = plt.subplots()
self.calculate_summary(func)
for var_name in var_name_list:
label = var_name if label is None else label
data = self.get_var_from_summary(var_name)[0]
data = self.convolve(data, window_size)
if linestyle is None:
ax.plot(range(self.num_episodes), data, label=label)
else:
ax.plot(range(self.num_episodes), data, label=label, linestyle=linestyle)
# grafica la desviacion estandar si es un promedio
if self.std_dev_top is not None and self.std_dev_bottom is not None:
top = self.get_var_from_array(var_name, self.std_dev_top)[0]
top = self.convolve(top, window_size)
bottom = self.get_var_from_array(var_name, self.std_dev_bottom)[0]
bottom = self.convolve(bottom, window_size)
ax.fill_between(range(self.num_episodes), bottom, top, alpha=0.25)
return fig, ax
def get_var_cummulative_matching_plot(self, var_name, matching_list, linestyle=None, fig=None,
ax=None, label=None):
"""
Recibe matching_list y verifica los valores en var_name de data. Genera un acumulado.
:param label:
:param ax:
:param fig:
:param linestyle:
:param var_name: el nombre de la variable a contar
:param matching_list: los valores que cuentan como 1
:return:
"""
if fig is None and ax is None:
fig, ax = plt.subplots()
data = self.get_var_from_data(var_name)
# compara con la lista y pone 1 si esta
test = np.isin(data, matching_list).astype(int)
# suma acumulada a traves de cada experimento
test = np.cumsum(test, axis=1)
# promedio de todos los experimentos
test = np.average(test, axis=0)
label = var_name if label is None else label
if linestyle is None:
ax.plot(range(self.num_episodes), test, label=label)
else:
ax.plot(range(self.num_episodes), test, label=label, linestyle=linestyle)
return fig, ax
def get_pie_plot(self, var_name, mapping_dict):
"""
Cuenta los elementos de una lista y los agrupa segun mapping_dict. Si alguna falta
le asigna la etiqueta other.
:param var_name:
:param mapping_dict:
:return:
"""
fig, ax = plt.subplots()
data = self.get_var_from_data(var_name)
data = data.astype(int).flatten().tolist()
data_copy = data[:]
labels = mapping_dict.keys()
count = []
for l in labels:
c = 0
for d in range(len(data)):
if data[d] in mapping_dict[l]:
c += 1
data_copy.remove(data[d])
count.append(c)
labels.append('other')
count.append(len(data_copy))
ax.pie(count, labels=labels, autopct='%1.1f%%', startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
return fig, ax
def save_data(self, name):
numpy_data = np.array(self.data)
np.save(name, numpy_data)
return self
@staticmethod
def load_data(name, var_name_list=None, num_episodes=None):
"""
Carga un archivo de datos y crea un objeto LinesPlotter
:param num_episodes:
:param name: el nombre del archivo que contiene los datos guardado con save_data()
:param var_name_list: el nombre de los datos guardados. Si no esta les asigna un entero.
:return:
"""
if num_episodes is None:
data = np.load(name)
else:
data = np.load(name)[:, 0:num_episodes, :]
print(data.shape)
num_experiments = data.shape[0]
num_episodes = data.shape[1]
if var_name_list is None:
var_name_list = [str(i) for i in range(data.shape[2])]
elif len(var_name_list) != data.shape[2]:
raise Exception('Invalid var_name_list. Must have len' + str(data.shape[2]))
plotter = LinesPlotter(var_name_list, num_experiments, num_episodes)
plotter.data = data
print('Data loaded. Shape:')
print(data.shape)
return plotter
``` |
{
"source": "jonativia/gitlab_utils",
"score": 2
} |
#### File: jonativia/gitlab_utils/dashboard.py
```python
import gitlab
import json
from jinja2 import Environment, FileSystemLoader
import os
import datetime
from gitlab_helper import GitlabHelper
import click
@click.command()
@click.option(
'--skip-archived/--no-skip-archived',
default=True,
help='Skip archived projects in the generated dashboard',
)
@click.argument('gitlab-url')
@click.argument('private-token')
@click.option('--group-name', required=True, help='Group name to process')
def generate_dashboard(gitlab_url, private_token, skip_archived, group_name):
# private token or personal token authentication
gitlab_connection = GitlabHelper(gitlab_url, private_token)
datetime_object = datetime.datetime.now()
generated_time = datetime_object.strftime("%d/%m/%Y, %H:%M:%S")
project_id_list = gitlab_connection.get_project_id_list(group_name, skip_archived)
proj_list = []
for project in project_id_list:
print(project.attributes.get('name'))
manageable_project = gitlab_connection.get_manageable_project(project)
proj_data = manageable_project.attributes
proj_data["pipeline_status"] = "None"
proj_data["pipeline_web_url"] = "none"
proj_data["master_pipeline_status"] = "None"
proj_data["master_pipeline_web_url"] = "none"
if manageable_project.pipelines.list():
proj_data["pipeline_status"] = manageable_project.pipelines.list()[0].status
proj_data["pipeline_web_url"] = manageable_project.pipelines.list()[
0
].web_url
try:
last_master_pipeline = next(
x
for x in manageable_project.pipelines.list()
if x.attributes['ref']
== manageable_project.attributes['default_branch']
)
except:
pass
if last_master_pipeline:
proj_data["master_pipeline_status"] = last_master_pipeline.status
proj_data["master_pipeline_web_url"] = last_master_pipeline.web_url
proj_list.append(proj_data)
json_data = json.dumps(proj_list)
with open('data.json', 'w') as outfile:
json.dump(json_data, outfile)
root = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(root, 'templates')
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template('dashboard.html')
filename = os.path.join(root, 'html', 'dashboard.html')
with open(filename, 'w') as fh:
fh.write(template.render(projects=proj_list, generated_time=generated_time))
if __name__ == '__main__':
generate_dashboard()
``` |
{
"source": "jonatron/django_media_uploader",
"score": 3
} |
#### File: django_media_uploader/media_uploader/files.py
```python
import fnmatch
from django.core.files.base import File
class ResumableFile(object):
def __init__(self, storage, kwargs):
self.storage = storage
self.kwargs = kwargs
self.chunk_suffix = "_part_"
@property
def chunk_exists(self):
"""Checks if the requested chunk exists.
"""
return self.storage.exists("%s%s%s" % (
self.filename,
self.chunk_suffix,
self.kwargs.get('resumableChunkNumber').zfill(4)
))
@property
def chunk_names(self):
"""Iterates over all stored chunks.
"""
chunks = []
files = sorted(self.storage.listdir('')[1])
for file in files:
if fnmatch.fnmatch(file, '%s%s*' % (self.filename,
self.chunk_suffix)):
chunks.append(file)
return chunks
def chunks(self):
"""Iterates over all stored chunks.
"""
chunks = []
files = sorted(self.storage.listdir('')[1])
for file in files:
if fnmatch.fnmatch(file, '%s%s*' % (self.filename,
self.chunk_suffix)):
yield self.storage.open(file, 'rb').read()
def delete_chunks(self):
[self.storage.delete(chunk) for chunk in self.chunk_names]
@property
def file(self):
"""Gets the complete file.
"""
if not self.is_complete:
raise Exception('Chunk(s) still missing')
return self
@property
def filename(self):
"""Gets the filename."""
filename = self.kwargs.get('resumableFilename')
if '/' in filename:
raise Exception('Invalid filename')
return "%s_%s" % (
self.kwargs.get('resumableTotalSize'),
filename
)
@property
def is_complete(self):
"""Checks if all chunks are already stored.
"""
return int(self.kwargs.get('resumableTotalSize')) == self.size
def process_chunk(self, file):
if not self.chunk_exists:
self.storage.save('%s%s%s' % (
self.filename,
self.chunk_suffix,
self.kwargs.get('resumableChunkNumber').zfill(4)
), file)
@property
def size(self):
"""Gets chunks size.
"""
size = 0
for chunk in self.chunk_names:
size += self.storage.size(chunk)
return size
``` |
{
"source": "jonaubf/flask-mongo-testapp",
"score": 2
} |
#### File: flask-mongo-testapp/testapp/manage.py
```python
from flask.ext.script import Manager, Server
from mainapp import create_app
app = create_app()
manager = Manager(app)
manager.add_command("runserver", Server(
use_debugger=True,
use_reloader=True,
host='0.0.0.0')
)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=3).run(tests)
if __name__ == "__main__":
manager.run()
``` |
{
"source": "jonaudet/CoSA",
"score": 2
} |
#### File: CoSA/beta/symtrim_UMI.py
```python
import os, sys
from Bio.Seq import Seq
from csv import DictReader, DictWriter
import pysam
import pdb
def clip_out(bam_filename, output_prefix, umi_len=5, extra_len=25, min_insert_len=300):
"""
:param bam_filename: BAM of post-LIMA (primer-trimmed) CCS sequences
M13-UMI-arm-insert-arm-UMI-M13
(M13 should already be removed)
"""
FIELDS = ['id', 'UMI1', 'UMI2', 'arm1', 'arm2', 'insert_len']
f1 = open(output_prefix + '.trimmed.csv', 'w')
writer1 = DictWriter(f1, FIELDS, delimiter=',')
writer1.writeheader()
reader = pysam.AlignmentFile(bam_filename, 'rb', check_sq=False)
f2 = pysam.AlignmentFile(output_prefix+'.trimmed.bam', 'wb', header=reader.header)
umi_extra_len = umi_len + extra_len
for r in reader:
d = r.to_dict()
if len(d['seq'])-umi_extra_len*2 < min_insert_len:
continue
umi1 = d['seq'][:umi_len]
umi2 = d['seq'][-umi_len:]
arm1 = d['seq'][umi_len:umi_extra_len]
arm2 = d['seq'][-umi_extra_len:-umi_len]
rec = {'id': r.qname,
'UMI1': umi1,
'UMI2': umi2,
'arm1': arm1,
'arm2': arm2,
'insert_len': len(d['seq'])-(umi_extra_len*2)}
writer1.writerow(rec)
d['seq'] = d['seq'][umi_extra_len:-umi_extra_len]
d['qual'] = d['qual'][umi_extra_len:-umi_extra_len]
new_tags = []
for tag in d['tags']:
if tag.startswith('zs:B'): # defunct CCS tag, don't use
pass
elif tag.startswith('dq:i:') or tag.startswith('iq:i:') or tag.startswith('sq:i:'):
tag = tag[umi_extra_len:-umi_extra_len]
new_tags.append(tag)
else:
new_tags.append(tag)
new_tags.append('rg:Z:' + umi2)
d['tags'] = new_tags
x = pysam.AlignedSegment.from_dict(d, r.header)
f2.write(x)
f1.close()
f2.close()
print("Output written to: {0}.trimmed.csv|bam".format(output_prefix))
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("bam_filename", help="CCS BAM with cDNA primer removed (post LIMA)")
parser.add_argument("output_prefix", help="Output prefix")
parser.add_argument("-u", "--umi_len", type=int, default=5, help="Length of UMI (default: 5)")
parser.add_argument("-e", "--extra_len", type=int, default=25, help="Length of arm sequence (default: 25)")
parser.add_argument("--min_insert_len", type=int, default=300, help="Minimum insert length (default: 300)")
args = parser.parse_args()
if args.extra_len < 0:
print("extra_len can't be a negative number!", file=sys.stderr)
sys.exit(-1)
if args.umi_len < 0:
print("umi_len can't be a negative number!", file=sys.stderr)
sys.exit(-1)
clip_out(args.bam_filename,
args.output_prefix,
args.umi_len,
args.extra_len,
args.min_insert_len)
``` |
{
"source": "jonauman/sample_code",
"score": 3
} |
#### File: jonauman/sample_code/parse_github.py
```python
from bs4 import BeautifulSoup
f = open('git.html','r')
file = f.read()
soup = BeautifulSoup(file, 'lxml')
comb = []
def dayify(x):
if 'weeks' in x:
days = int(filter(str.isdigit, str(x))) * 7
elif 'months' in x:
days = int(filter(str.isdigit, str(x))) * 30
else:
days = int(filter(str.isdigit, str(x)))
return days
for row in soup.findAll('table')[0].tbody.findAll('tr'):
x = row.findAll('td')[0].text.strip()
y = row.findAll('td')[4].text.strip()
comb.append((x,dayify(y)))
newlist = sorted(comb, key=lambda x: x[1])
for x in range(0, len(newlist)):
print "%s : %s days ago" % (newlist[x][0], newlist[x][1])
``` |
{
"source": "jonaustin/advisoryscan",
"score": 2
} |
#### File: django/utils/encoding.py
```python
from django.conf import settings
from django.utils.functional import Promise
def smart_unicode(s):
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call, or similar. It will
# already be encoded in DEFAULT_CHARSET on evaluation and we don't want
# to evaluate it until render time.
# FIXME: This isn't totally consistent, because it eventually returns a
# bytestring rather than a unicode object. It works wherever we use
# smart_unicode() at the moment. Fixing this requires work in the
# i18n internals.
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), settings.DEFAULT_CHARSET)
elif not isinstance(s, unicode):
s = unicode(s, settings.DEFAULT_CHARSET)
return s
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a bytestring
according to settings.DEFAULT_CHARSET.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode(settings.DEFAULT_CHARSET)
```
#### File: modeltests/choices/models.py
```python
from django.db import models
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
class Person(models.Model):
name = models.CharField(maxlength=20)
gender = models.CharField(maxlength=1, choices=GENDER_CHOICES)
def __str__(self):
return self.name
__test__ = {'API_TESTS':"""
>>> a = Person(name='Adrian', gender='M')
>>> a.save()
>>> s = Person(name='Sara', gender='F')
>>> s.save()
>>> a.gender
'M'
>>> s.gender
'F'
>>> a.get_gender_display()
'Male'
>>> s.get_gender_display()
'Female'
"""}
```
#### File: modeltests/m2m_multiple/models.py
```python
from django.db import models
class Category(models.Model):
name = models.CharField(maxlength=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Article(models.Model):
headline = models.CharField(maxlength=50)
pub_date = models.DateTimeField()
primary_categories = models.ManyToManyField(Category, related_name='primary_article_set')
secondary_categories = models.ManyToManyField(Category, related_name='secondary_article_set')
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
__test__ = {'API_TESTS':"""
>>> from datetime import datetime
>>> c1 = Category(name='Sports')
>>> c1.save()
>>> c2 = Category(name='News')
>>> c2.save()
>>> c3 = Category(name='Crime')
>>> c3.save()
>>> c4 = Category(name='Life')
>>> c4.save()
>>> a1 = Article(headline='Area man steals', pub_date=datetime(2005, 11, 27))
>>> a1.save()
>>> a1.primary_categories.add(c2, c3)
>>> a1.secondary_categories.add(c4)
>>> a2 = Article(headline='Area man runs', pub_date=datetime(2005, 11, 28))
>>> a2.save()
>>> a2.primary_categories.add(c1, c2)
>>> a2.secondary_categories.add(c4)
>>> a1.primary_categories.all()
[<Category: Crime>, <Category: News>]
>>> a2.primary_categories.all()
[<Category: News>, <Category: Sports>]
>>> a1.secondary_categories.all()
[<Category: Life>]
>>> c1.primary_article_set.all()
[<Article: Area man runs>]
>>> c1.secondary_article_set.all()
[]
>>> c2.primary_article_set.all()
[<Article: Area man steals>, <Article: Area man runs>]
>>> c2.secondary_article_set.all()
[]
>>> c3.primary_article_set.all()
[<Article: Area man steals>]
>>> c3.secondary_article_set.all()
[]
>>> c4.primary_article_set.all()
[]
>>> c4.secondary_article_set.all()
[<Article: Area man steals>, <Article: Area man runs>]
"""}
```
#### File: modeltests/m2o_recursive/models.py
```python
from django.db import models
class Category(models.Model):
name = models.CharField(maxlength=20)
parent = models.ForeignKey('self', null=True, related_name='child_set')
def __str__(self):
return self.name
__test__ = {'API_TESTS':"""
# Create a few Category objects.
>>> r = Category(id=None, name='Root category', parent=None)
>>> r.save()
>>> c = Category(id=None, name='Child category', parent=r)
>>> c.save()
>>> r.child_set.all()
[<Category: Child category>]
>>> r.child_set.get(name__startswith='Child')
<Category: Child category>
>>> print r.parent
None
>>> c.child_set.all()
[]
>>> c.parent
<Category: Root category>
"""}
```
#### File: modeltests/model_inheritance/models.py
```python
from django.db import models
class Place(models.Model):
name = models.CharField(maxlength=50)
address = models.CharField(maxlength=80)
def __str__(self):
return "%s the place" % self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __str__(self):
return "%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __str__(self):
return "%s the italian restaurant" % self.name
__test__ = {'API_TESTS':"""
# Make sure Restaurant has the right fields in the right order.
>>> [f.name for f in Restaurant._meta.fields]
['id', 'name', 'address', 'serves_hot_dogs', 'serves_pizza']
# Make sure ItalianRestaurant has the right fields in the right order.
>>> [f.name for f in ItalianRestaurant._meta.fields]
['id', 'name', 'address', 'serves_hot_dogs', 'serves_pizza', 'serves_gnocchi']
# Create a couple of Places.
>>> p1 = Place(name='Master Shakes', address='666 W. Jersey')
>>> p1.save()
>>> p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
>>> p2.save()
# Test constructor for Restaurant.
>>> r = Restaurant(name='<NAME>', address='944 W. Fullerton', serves_hot_dogs=True, serves_pizza=False)
>>> r.save()
# Test the constructor for ItalianRestaurant.
>>> ir = ItalianRestaurant(name='<NAME>', address='1234 W. Elm', serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True)
>>> ir.save()
"""}
```
#### File: modeltests/reverse_lookup/models.py
```python
from django.db import models
class User(models.Model):
name = models.CharField(maxlength=200)
def __str__(self):
return self.name
class Poll(models.Model):
question = models.CharField(maxlength=200)
creator = models.ForeignKey(User)
def __str__(self):
return self.question
class Choice(models.Model):
name = models.CharField(maxlength=100)
poll = models.ForeignKey(Poll, related_name="poll_choice")
related_poll = models.ForeignKey(Poll, related_name="related_choice")
def __str(self):
return self.name
__test__ = {'API_TESTS':"""
>>> john = User(name="<NAME>")
>>> john.save()
>>> jim = User(name="<NAME>")
>>> jim.save()
>>> first_poll = Poll(question="What's the first question?", creator=john)
>>> first_poll.save()
>>> second_poll = Poll(question="What's the second question?", creator=jim)
>>> second_poll.save()
>>> new_choice = Choice(poll=first_poll, related_poll=second_poll, name="This is the answer.")
>>> new_choice.save()
>>> # Reverse lookups by field name:
>>> User.objects.get(poll__question__exact="What's the first question?")
<User: <NAME>>
>>> User.objects.get(poll__question__exact="What's the second question?")
<User: <NAME>>
>>> # Reverse lookups by related_name:
>>> Poll.objects.get(poll_choice__name__exact="This is the answer.")
<Poll: What's the first question?>
>>> Poll.objects.get(related_choice__name__exact="This is the answer.")
<Poll: What's the second question?>
>>> # If a related_name is given you can't use the field name instead:
>>> Poll.objects.get(choice__name__exact="This is the answer")
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'choice' into field. Choices are: poll_choice, related_choice, id, question, creator
"""}
```
#### File: modeltests/str/models.py
```python
from django.db import models
class Article(models.Model):
headline = models.CharField(maxlength=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
__test__ = {'API_TESTS':"""
# Create an Article.
>>> from datetime import datetime
>>> a = Article(headline='Area man programs in Python', pub_date=datetime(2005, 7, 28))
>>> a.save()
>>> str(a)
'Area man programs in Python'
>>> a
<Article: Area man programs in Python>
"""}
```
#### File: dispatch/tests/test_robustapply.py
```python
from django.dispatch.robustapply import *
import unittest
def noArgument():
pass
def oneArgument(blah):
pass
def twoArgument(blah, other):
pass
class TestCases(unittest.TestCase):
def test01(self):
robustApply(noArgument)
def test02(self):
self.assertRaises(TypeError, robustApply, noArgument, "this")
def test03(self):
self.assertRaises(TypeError, robustApply, oneArgument)
def test04(self):
"""Raise error on duplication of a particular argument"""
self.assertRaises(TypeError, robustApply, oneArgument, "this", blah = "that")
def getSuite():
return unittest.makeSuite(TestCases,'test')
if __name__ == "__main__":
unittest.main()
```
#### File: regressiontests/fixtures_regress/models.py
```python
from django.db import models
class Animal(models.Model):
name = models.CharField(maxlength=150)
latin_name = models.CharField(maxlength=150)
def __str__(self):
return self.common_name
class Plant(models.Model):
name = models.CharField(maxlength=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
__test__ = {'API_TESTS':"""
>>> from django.core import management
# Load a fixture that uses PK=1
>>> management.load_data(['sequence'], verbosity=0)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
# This is a regression test for ticket #3790.
>>> animal = Animal(name='Platypus', latin_name='Ornithorhynchus anatinus')
>>> animal.save()
"""}
```
#### File: regressiontests/invalid_admin_options/models.py
```python
from django.db import models
model_errors = ""
# TODO: Invalid admin options should not cause a metaclass error
##This should fail gracefully but is causing a metaclass error
#class BadAdminOption(models.Model):
# "Test nonexistent admin option"
# name = models.CharField(maxlength=30)
#
# class Admin:
# nonexistent = 'option'
#
#model_errors += """invalid_admin_options.badadminoption: "admin" attribute, if given, must be set to a models.AdminOptions() instance.
#"""
class ListDisplayBadOne(models.Model):
"Test list_display, list_display must be a list or tuple"
first_name = models.CharField(maxlength=30)
class Admin:
list_display = 'first_name'
model_errors += """invalid_admin_options.listdisplaybadone: "admin.list_display", if given, must be set to a list or tuple.
"""
class ListDisplayBadTwo(models.Model):
"Test list_display, list_display items must be attributes, methods or properties."
first_name = models.CharField(maxlength=30)
class Admin:
list_display = ['first_name','nonexistent']
model_errors += """invalid_admin_options.listdisplaybadtwo: "admin.list_display" refers to 'nonexistent', which isn't an attribute, method or property.
"""
class ListDisplayBadThree(models.Model):
"Test list_display, list_display items can not be a ManyToManyField."
first_name = models.CharField(maxlength=30)
nick_names = models.ManyToManyField('ListDisplayGood')
class Admin:
list_display = ['first_name','nick_names']
model_errors += """invalid_admin_options.listdisplaybadthree: "admin.list_display" doesn't support ManyToManyFields ('nick_names').
"""
class ListDisplayGood(models.Model):
"Test list_display, Admin list_display can be a attribute, method or property."
first_name = models.CharField(maxlength=30)
def _last_name(self):
return self.first_name
last_name = property(_last_name)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
class Admin:
list_display = ['first_name','last_name','full_name']
class ListDisplayLinksBadOne(models.Model):
"Test list_display_links, item must be included in list_display."
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
list_display = ['last_name']
list_display_links = ['first_name']
model_errors += """invalid_admin_options.listdisplaylinksbadone: "admin.list_display_links" refers to 'first_name', which is not defined in "admin.list_display".
"""
class ListDisplayLinksBadTwo(models.Model):
"Test list_display_links, must be a list or tuple."
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
list_display = ['first_name','last_name']
list_display_links = 'last_name'
model_errors += """invalid_admin_options.listdisplaylinksbadtwo: "admin.list_display_links", if given, must be set to a list or tuple.
"""
# TODO: Fix list_display_links validation or remove the check for list_display
## This is failing but the validation which should fail is not.
#class ListDisplayLinksBadThree(models.Model):
# "Test list_display_links, must define list_display to use list_display_links."
# first_name = models.CharField(maxlength=30)
# last_name = models.CharField(maxlength=30)
#
# class Admin:
# list_display_links = ('first_name',)
#
#model_errors += """invalid_admin_options.listdisplaylinksbadthree: "admin.list_display" must be defined for "admin.list_display_links" to be used.
#"""
class ListDisplayLinksGood(models.Model):
"Test list_display_links, Admin list_display_list can be a attribute, method or property."
first_name = models.CharField(maxlength=30)
def _last_name(self):
return self.first_name
last_name = property(_last_name)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
class Admin:
list_display = ['first_name','last_name','full_name']
list_display_links = ['first_name','last_name','full_name']
class ListFilterBadOne(models.Model):
"Test list_filter, must be a list or tuple."
first_name = models.CharField(maxlength=30)
class Admin:
list_filter = 'first_name'
model_errors += """invalid_admin_options.listfilterbadone: "admin.list_filter", if given, must be set to a list or tuple.
"""
class ListFilterBadTwo(models.Model):
"Test list_filter, must be a field not a property or method."
first_name = models.CharField(maxlength=30)
def _last_name(self):
return self.first_name
last_name = property(_last_name)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
class Admin:
list_filter = ['first_name','last_name','full_name']
model_errors += """invalid_admin_options.listfilterbadtwo: "admin.list_filter" refers to 'last_name', which isn't a field.
invalid_admin_options.listfilterbadtwo: "admin.list_filter" refers to 'full_name', which isn't a field.
"""
class DateHierarchyBadOne(models.Model):
"Test date_hierarchy, must be a date or datetime field."
first_name = models.CharField(maxlength=30)
birth_day = models.DateField()
class Admin:
date_hierarchy = 'first_name'
# TODO: Date Hierarchy needs to check if field is a date/datetime field.
#model_errors += """invalid_admin_options.datehierarchybadone: "admin.date_hierarchy" refers to 'first_name', which isn't a date field or datetime field.
#"""
class DateHierarchyBadTwo(models.Model):
"Test date_hieracrhy, must be a field."
first_name = models.CharField(maxlength=30)
birth_day = models.DateField()
class Admin:
date_hierarchy = 'nonexistent'
model_errors += """invalid_admin_options.datehierarchybadtwo: "admin.date_hierarchy" refers to 'nonexistent', which isn't a field.
"""
class DateHierarchyGood(models.Model):
"Test date_hieracrhy, must be a field."
first_name = models.CharField(maxlength=30)
birth_day = models.DateField()
class Admin:
date_hierarchy = 'birth_day'
class SearchFieldsBadOne(models.Model):
"Test search_fields, must be a list or tuple."
first_name = models.CharField(maxlength=30)
class Admin:
search_fields = ('nonexistent')
# TODO: Add search_fields validation
#model_errors += """invalid_admin_options.seacrhfieldsbadone: "admin.search_fields", if given, must be set to a list or tuple.
#"""
class SearchFieldsBadTwo(models.Model):
"Test search_fields, must be a field."
first_name = models.CharField(maxlength=30)
def _last_name(self):
return self.first_name
last_name = property(_last_name)
class Admin:
search_fields = ['first_name','last_name']
# TODO: Add search_fields validation
#model_errors += """invalid_admin_options.seacrhfieldsbadone: "admin.search_fields" refers to 'last_name', which isn't a field.
#"""
class SearchFieldsGood(models.Model):
"Test search_fields, must be a list or tuple."
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
search_fields = ['first_name','last_name']
class JsBadOne(models.Model):
"Test js, must be a list or tuple"
name = models.CharField(maxlength=30)
class Admin:
js = 'test.js'
# TODO: Add a js validator
#model_errors += """invalid_admin_options.jsbadone: "admin.js", if given, must be set to a list or tuple.
#"""
class SaveAsBad(models.Model):
"Test save_as, should be True or False"
name = models.CharField(maxlength=30)
class Admin:
save_as = 'not True or False'
# TODO: Add a save_as validator.
#model_errors += """invalid_admin_options.saveasbad: "admin.save_as", if given, must be set to True or False.
#"""
class SaveOnTopBad(models.Model):
"Test save_on_top, should be True or False"
name = models.CharField(maxlength=30)
class Admin:
save_on_top = 'not True or False'
# TODO: Add a save_on_top validator.
#model_errors += """invalid_admin_options.saveontopbad: "admin.save_on_top", if given, must be set to True or False.
#"""
class ListSelectRelatedBad(models.Model):
"Test list_select_related, should be True or False"
name = models.CharField(maxlength=30)
class Admin:
list_select_related = 'not True or False'
# TODO: Add a list_select_related validator.
#model_errors += """invalid_admin_options.listselectrelatebad: "admin.list_select_related", if given, must be set to True or False.
#"""
class ListPerPageBad(models.Model):
"Test list_per_page, should be a positive integer value."
name = models.CharField(maxlength=30)
class Admin:
list_per_page = 89.3
# TODO: Add a list_per_page validator.
#model_errors += """invalid_admin_options.listperpagebad: "admin.list_per_page", if given, must be a positive integer.
#"""
class FieldsBadOne(models.Model):
"Test fields, should be a tuple"
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
fields = 'not a tuple'
# TODO: Add a fields validator.
#model_errors += """invalid_admin_options.fieldsbadone: "admin.fields", if given, must be a tuple.
#"""
class FieldsBadTwo(models.Model):
"""Test fields, 'fields' dict option is required."""
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
fields = ('Name', {'description': 'this fieldset needs fields'})
# TODO: Add a fields validator.
#model_errors += """invalid_admin_options.fieldsbadtwo: "admin.fields" each fieldset must include a 'fields' dict.
#"""
class FieldsBadThree(models.Model):
"""Test fields, 'classes' and 'description' are the only allowable extra dict options."""
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
fields = ('Name', {'fields': ('first_name','last_name'),'badoption': 'verybadoption'})
# TODO: Add a fields validator.
#model_errors += """invalid_admin_options.fieldsbadthree: "admin.fields" fieldset options must be either 'classes' or 'description'.
#"""
class FieldsGood(models.Model):
"Test fields, working example"
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
birth_day = models.DateField()
class Admin:
fields = (
('Name', {'fields': ('first_name','last_name'),'classes': 'collapse'}),
(None, {'fields': ('birth_day',),'description': 'enter your b-day'})
)
class OrderingBad(models.Model):
"Test ordering, must be a field."
first_name = models.CharField(maxlength=30)
last_name = models.CharField(maxlength=30)
class Admin:
ordering = 'nonexistent'
# TODO: Add a ordering validator.
#model_errors += """invalid_admin_options.orderingbad: "admin.ordering" refers to 'nonexistent', which isn't a field.
#"""
## TODO: Add a manager validator, this should fail gracefully.
#class ManagerBad(models.Model):
# "Test manager, must be a manager object."
# first_name = models.CharField(maxlength=30)
#
# class Admin:
# manager = 'nonexistent'
#
#model_errors += """invalid_admin_options.managerbad: "admin.manager" refers to 'nonexistent', which isn't a Manager().
#"""
```
#### File: regressiontests/test_client_regress/models.py
```python
from django.test import Client, TestCase
from django.core import mail
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertEquals(str(e), "No templates used to render the response")
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
#
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty POST Template' was not used to render the response. Actual template was 'Empty GET Template'")
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': '<EMAIL>',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertEquals(str(e), "Template 'Valid POST Template' was not one of the templates used to render the response. Templates used: ['form_view.html', 'base.html']")
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Reponse code was 301 (expected 302)")
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Reponse code was 301 (expected 302)")
def test_target_page(self):
"An assertion is raised if the reponse redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, '/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': '<NAME>',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response")
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': '<NAME>',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'")
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': '<NAME>',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors")
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
```
#### File: vulntracker/vulnalert/util.py
```python
def nicepass(alpha=6,numeric=2):
"""
returns a human-readble password (say <PASSWORD> instead of
a difficult to remember K8Yn9muL )
"""
import string
import random
vowels = ['a','e','i','o','u']
consonants = [a for a in string.ascii_lowercase if a not in vowels]
digits = string.digits
####utility functions
def a_part(slen):
ret = ''
for i in range(slen):
if i%2 ==0:
randid = random.randint(0,20) #number of consonants
ret += consonants[randid]
else:
randid = random.randint(0,4) #number of vowels
ret += vowels[randid]
return ret
def n_part(slen):
ret = ''
for i in range(slen):
randid = random.randint(0,9) #number of digits
ret += digits[randid]
return ret
####
fpl = alpha/2
if alpha % 2 :
fpl = int(alpha/2) + 1
lpl = alpha - fpl
start = a_part(fpl)
mid = n_part(numeric)
end = a_part(lpl)
return "%s%s%s" % (start,mid,end)
if __name__ == "__main__":
print nicepass(6,2)
``` |
{
"source": "jonaustin/pypingdom",
"score": 3
} |
#### File: pypingdom/pypingdom/gui.py
```python
from __future__ import absolute_import
import json
import requests
class PingdomGuiException(Exception):
def __init__(self, http_response):
content = json.loads(http_response.content)
self.status_code = http_response.status_code
self.status_desc = content['error']['statusdesc']
self.error_message = content['error']['errormessage']
super(PingdomGuiException, self).__init__(self.__str__())
def __repr__(self):
return 'pingdom.PingdomGuiException: HTTP `%s - %s` returned with message, "%s"' % \
(self.status_code, self.status_desc, self.error_message)
def __str__(self):
return self.__repr__()
class Gui():
def __init__(self, username, password):
self.__username = username
self.__password = password
self.session = requests.session()
_ua = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 " \
+ "(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
self.headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.8",
"cache-control": "no-cache",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"origin": "https://my.pingdom.com",
"pragma": "no-cache",
"referer": "https://my.pingdom.com/",
"user-agent": _ua,
"x-requested-with": "XMLHttpRequest"
}
def send(self, method, url, data=None, params=None):
if data is None:
data = {}
if params is None:
params = {}
response = self.session.request(method, url, data=data, params=params, headers=self.headers)
if response.status_code != 200:
raise PingdomGuiException(response)
return response
def login(self):
data = {"email": self.__username, "password": self.__password}
self.send('post', 'https://my.pingdom.com/', data)
```
#### File: pypingdom/pypingdom/maintenance.py
```python
from __future__ import absolute_import
import datetime
import time
class Maintenance(object):
def __init__(self, client, json=False, obj=False):
self.client = client
self._id = False
if json:
self.from_json(json)
elif obj:
self.from_obj(obj)
else:
raise Exception("Missing definition: use json or obj parameter")
def __repr__(self):
checks = []
for check in self.checks:
if check:
checks.append(check.name)
else:
checks.append("<deleted check>")
return """
pingdom.Maintenance <{0}>
name: {1}
start: {2}
end: {3}
checks: {4}
""".format(self._id,
self.name,
self.start,
self.stop,
", ".join(checks))
def to_json(self):
check_ids = [str(check._id) for check in self.checks if check]
data = {
# "__csrf_magic": "",
# "id": "",
"description": self.name,
"from-date": "{0}.{1}.{2}.".format(self.start.year, self.start.month, self.start.day),
"from-time": "{0}:{1}".format(self.start.hour, self.start.minute),
"to-date": "{0}.{1}.{2}.".format(self.stop.year, self.stop.month, self.stop.day),
"to-time": "{0}:{1}".format(self.stop.hour, self.stop.minute),
"start": int(time.mktime(self.start.timetuple())),
"end": int(time.mktime(self.stop.timetuple())),
"checks": "[{0}]".format(",".join(check_ids))
}
return data
def from_json(self, obj):
self._id = int(obj['id'])
self.name = obj["description"]
self.start = datetime.datetime.fromtimestamp(obj['from'])
self.stop = datetime.datetime.fromtimestamp(obj['to'])
self.checks = [self.client.get_check(_id=int(x)) for x in obj['checks']['uptime']]
def from_obj(self, obj):
self.name = obj["name"]
self.start = obj['start']
self.stop = obj['stop']
self.checks = obj['checks']
``` |
{
"source": "Jonax79409/MakeMeKnown",
"score": 3
} |
#### File: deep-malware-analysis/model/dataset_builder.py
```python
import json
import sys
import glob
import os
from .transformer import PETransformer
class DatasetBuilder(object):
"""
Accept directory input,
directory_format:
malware/
trojan/
backdoor/
rootkit/
benign/
extract features from PE files in the directory,
serialize extracted features,
save as json to be used later
"""
MALWARE_TYPES = ['trojan', 'backdoor', 'rootkit']
MALWARE_DIR = 'malwares'
BENIGN_DIR = 'benign'
BUILD_DIR = 'build'
def __init__(self, data_dir, *args, **kwargs):
self._build_dataset(data_dir)
@staticmethod
def _create_dir(path):
os.makedirs(path)
@staticmethod
def _path(*args, **kwargs):
return os.path.join(*args, **kwargs)
@staticmethod
def _exists(*args, **kwargs):
return os.path.exists(*args, **kwargs)
def _build_dataset(self, data_dir):
BUILD_PATH = self._path(data_dir, self.BUILD_DIR)
if not self._exists(BUILD_PATH):
self._create_dir(BUILD_PATH)
print('[+] Building Malwares')
# Iterate over the different malware types
for mal_type in self.MALWARE_TYPES:
print(f'[+] Building {mal_type} malwares')
MALWARE_PATH = self._path(data_dir, self.MALWARE_DIR, mal_type)
BUILD_MALWARE_PATH = self._path(BUILD_PATH, self.MALWARE_DIR, mal_type)
if not self._exists(BUILD_MALWARE_PATH):
self._create_dir(BUILD_MALWARE_PATH)
# Add all serialized malwares to a single file <data_dir>/build/malware/<malware_type>/<mal_type>_data.dat
build_file_name = self._path(BUILD_MALWARE_PATH, f'{mal_type}_data.jsonl')
if self._exists(build_file_name):
os.remove(build_file_name)
# Add all malwares
for dirpath, dnames, fnames in os.walk(MALWARE_PATH):
for file_name in fnames:
print(f'[+] Building {file_name}')
try:
transformed = PETransformer(self._path(MALWARE_PATH, file_name))
except:
continue
data_dict = transformed.feature_dict
with open(build_file_name, 'a') as build_file:
build_file.write(json.dumps(data_dict))
build_file.write('\n')
# Iterate over benign files
print('[+] Building benign data')
BUILD_BENIGN_PATH = self._path(BUILD_PATH, self.BENIGN_DIR)
BENIGN_PATH = self._path(data_dir, self.BENIGN_DIR)
if not self._exists(BUILD_BENIGN_PATH):
self._create_dir(BUILD_BENIGN_PATH)
build_file_name = self._path(BUILD_BENIGN_PATH, f'{self.BENIGN_DIR}_data.jsonl')
for file_name in glob.glob(self._path(BENIGN_PATH, './*.*')):
print(f'[+] Building {file_name}')
transformed = PETransformer(file_name)
data_dict = transformed.feature_dict
with open(build_file_name, 'a') as build_file:
build_file.write(json.dumps(data_dict))
build_file.write('\n')
class DatasetReader(object):
"""
Read built files.
"""
MALWARE_TYPES = ['trojan', 'backdoor', 'rootkit']
MALWARE_DIR = 'malwares'
BENIGN_DIR = 'benign'
BUILD_DIR = 'build'
def __init__(self, data_dir, *args, **kwargs):
self.read_data = None
self._read_dataset(data_dir)
@staticmethod
def _path(*args, **kwargs):
return os.path.join(*args, **kwargs)
@staticmethod
def _exists(*args, **kwargs):
return os.path.exists(*args, **kwargs)
def _read_dataset(self, data_dir):
READ_DATA = {m: [] for m in self.MALWARE_TYPES}
BUILD_PATH = self._path(data_dir, self.BUILD_DIR)
# Iterate over the different malware types
for mal_type in self.MALWARE_TYPES:
READ_DATA[mal_type] = []
BUILD_MALWARE_PATH = self._path(BUILD_PATH, self.MALWARE_DIR, mal_type)
# Read from data file <data_dir>/build/malware/<malware_type>/<mal_type>_data.dat
build_file_name = self._path(BUILD_MALWARE_PATH, f'{mal_type}_data.jsonl')
if not self._exists(build_file_name):
continue
with open(build_file_name, 'r') as build_file:
lines = [line.strip() for line in build_file.readlines()]
for mal_data in lines:
READ_DATA[mal_type].append(PETransformer(raw_features=mal_data).vector)
# Iterate over benign files
BUILD_BENIGN_PATH = self._path(BUILD_PATH, self.BENIGN_DIR)
build_file_name = self._path(BUILD_BENIGN_PATH, f'{self.BENIGN_DIR}_data.jsonl')
READ_DATA[self.BENIGN_DIR] = []
if not self._exists(build_file_name):
self.read_data = READ_DATA
return READ_DATA
with open(build_file_name, 'r') as build_file:
lines = [line.strip() for line in build_file.readlines()]
for mal_data in lines:
READ_DATA[self.BENIGN_DIR].append(PETransformer(raw_features=mal_data).vector)
self.read_data = READ_DATA
return READ_DATA
if __name__ == '__main__':
try:
dir_path = sys.argv[1]
except:
dir_path = 'dataset'
builder = DatasetBuilder(dir_path)
``` |
{
"source": "jonay2000/Emulator",
"score": 3
} |
#### File: jonay2000/Emulator/harddisk_interface.py
```python
import queue
class HDD:
def __init__(self, filename):
self.file = open(filename,"rb+")
self.moveto()
self.operations = queue.Queue()
from status import STATUS
self.STATUS = STATUS
def moveto(self,pos=1):
self.file.seek(pos,0)
def move(self,pos=1):
try:
self.file.seek(pos-1,1)
except:
return IOError()
def read(self):
try:
return int.from_bytes(self.file.read(1),byteorder='little')
except IndexError:
print("end of hdd reached. attempting shutdown...")
self.STATUS["INTERRUPT"] = True
return 0
def write(self,value):
try:
self.file.write(bytes([value]))
except IndexError:
print("end of hdd reached. attempting shutdown...")
self.STATUS["INTERRUPT"] = True
def update(self):
if not self.operations.empty():
a = self.operations.get()
if a["item"][0] == "READ":
self.moveto(a["item"][1])
a["callback"](self.read())
elif a["item"][0] == "READNEXT":
self.move(1)
a["callback"](self.read())
elif a["item"][0] == "WRITE":
self.moveto(a["item"][1])
self.write(a["item"][2])
else:
return
def new_operation(self,item,callback):
self.operations.put({"item":item,"callback":callback})
``` |
{
"source": "jonay2000/research-project",
"score": 3
} |
#### File: final_poster/inmatchings/generate.py
```python
import itertools
from typing import Iterator
from math import sin, cos, radians
import pygame
from pygame import gfxdraw
import imageio
from tqdm import tqdm
def lerp(val, srclow, srcup, destlow, destup):
return (val - srclow) / (srcup - srclow) * (destup - destlow) + destlow
def Move(rotation, steps, position):
x_pos = cos(radians(rotation)) * steps + position[0]
y_pos = sin(radians(rotation)) * steps + position[1]
return x_pos, y_pos
def DrawThickLine(surface, point1, point2, thickness, color):
from math import degrees, atan2
angle = degrees(atan2(point1[1] - point2[1], point1[0] - point2[0]))
vertices = list()
vertices.append(Move(angle - 90, thickness, point1))
vertices.append(Move(angle + 90, thickness, point1))
vertices.append(Move(angle + 90, thickness, point2))
vertices.append(Move(angle - 90, thickness, point2))
gfxdraw.aapolygon(surface, vertices, color)
gfxdraw.filled_polygon(surface, vertices, color)
def matchings(indices: list[int]) -> Iterator[list[int]]:
yield from itertools.permutations(indices)
def recolor(img, color):
w, h = img.get_size()
r, g, b = color
for x in range(w):
for y in range(h):
a = img.get_at((x, y))[3]
img.set_at((x, y), pygame.Color(r, g, b, a))
return img
pygame.init()
n = 3
width = 1000
height = (width / 4) * n
node_size = height / (3 * n)
window = pygame.display.set_mode((int(width), int(height)))
colors = [
(204, 68, 82),
(36, 97, 128),
(128, 29, 39),
(47, 152, 204),
(17, 128, 42),
(67, 204, 98),
(57, 204, 174),
(102, 82, 74),
(128, 124, 23),
(204, 111, 78),
]
bg = (0x22, 0x27, 0x2e)
line_color = (0x72, 0x31, 0x47)
black = (0, 0, 0)
clock = pygame.time.Clock()
images = []
goal_img = pygame.image.load("goal.png").convert_alpha()
goal_imgs = {
i: pygame.transform.scale(recolor(goal_img, i), (int(node_size * 2), int(node_size * 2)))
for i in tqdm(colors[:1])
}
timesteps = 20
starts = list(range(n))
for t in range(0, 2):
for progress in range(timesteps + 1):
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit(0)
window.fill(bg)
goals = []
for i in range(n):
y = (height * i / n) + (height / n / 2)
x1 = node_size + (width / 8)
goals.append((x1, y))
window.blit(goal_imgs[colors[0]], (x1 - node_size, y - node_size))
if t == 1:
goals = [goals[int(n / 2)]]
s = (width - node_size, height / 2)
for g in goals:
ax = lerp(progress, 0, timesteps, s[0], g[0])
ay = lerp(progress, 0, timesteps, s[1], g[1])
if t == 1:
DrawThickLine(window, s, (ax, ay), 3, colors[4])
c = (*colors[0], 255)
else:
c = (*colors[0], int(255 * 0.6))
print(c)
pygame.draw.rect(window, colors[0], (s[0] - node_size / 2, s[1] - node_size / 2, node_size, node_size))
pygame.draw.circle(window, black, (ax, ay), (node_size * 0.5) + 2, width=0)
pygame.draw.circle(window, black, (ax, ay), (node_size * 0.5) + 1, width=0)
gfxdraw.filled_circle(window, int(ax), int(ay), int((node_size * 0.5)), c)
pygame.display.flip()
data = pygame.surfarray.array3d(window)
data = data.swapaxes(0, 1)
images.append(data)
clock.tick(20)
imageio.mimsave('output.gif', images, fps=20)
```
#### File: benchmarks/comparison/icts.py
```python
from mapfmclient import Problem, Solution
import sys
import pathlib
from python.benchmarks.comparison.util import get_src_modules, solve_with_modules
this_dir = pathlib.Path(__file__).parent.absolute()
from python.algorithm import MapfAlgorithm
sys.path.insert(0, str(this_dir / "icts-m"))
from src.ictsm .solver import Solver
from src.ictsm .solver_config import SolverConfig
sys.path.pop(0)
modules = get_src_modules()
class ICTS(MapfAlgorithm):
def solve(self, problem: Problem) -> Solution:
def solve_icts():
config = SolverConfig(
name="Exh+E+B+O+ID",
combs=3,
prune=True,
enhanced=True,
pruned_child_gen=False,
id=False,
conflict_avoidance=True,
enumerative=True,
debug=False,
sort_matchings=True,
budget_search=True,
)
return Solver(config, problem)()
solve = solve_with_modules(modules, solve_icts)
return Solution.from_paths(solve)
@property
def name(self) -> str:
return "ICTS* (Thom)"
``` |
{
"source": "jonaylor89/cyberpunk",
"score": 3
} |
#### File: cyberpunk/cyberpunk/config.py
```python
import os
from typing import Optional
class CyberpunkConfigException(Exception):
pass
class CyberpunkConfig:
"""Global configuration object for Cyberpunk"""
def __init__(
self,
audio_path: str = "local",
local_storage_base_dir: Optional[str] = "testdata/",
local_results_base_dir: Optional[str] = None,
s3_loader_bucket: Optional[str] = None,
s3_loader_base_dir: Optional[str] = None,
s3_storage_bucket: Optional[str] = None,
s3_storage_base_dir: Optional[str] = None,
s3_results_bucket: Optional[str] = None,
s3_results_base_dir: Optional[str] = None,
google_application_credentials: Optional[str] = None,
gcs_loader_bucket: Optional[str] = None,
gcs_loader_base_dir: Optional[str] = None,
gcs_storage_bucket: Optional[str] = None,
gcs_storage_base_dir: Optional[str] = None,
gcs_results_bucket: Optional[str] = None,
gcs_results_base_dir: Optional[str] = None,
jaeger_tracing: Optional[bool] = False,
jaeger_agent_hostname: Optional[str] = "jaeger",
jaeger_agent_port: Optional[int] = 6831,
gcp_tracing: Optional[bool] = False,
):
# TODO: validation lol
# local | s3 | audius
self.audio_path = audio_path
if (
"local" in self.audio_path.split(":")
and local_storage_base_dir is None
):
raise CyberpunkConfigException(
"local_storage_base_dir must be configured if `local` in audio_path",
)
self.local_storage_base_dir = local_storage_base_dir
self.local_results_base_dir = local_results_base_dir
if "s3" in self.audio_path.split(":") and s3_storage_bucket is None:
raise CyberpunkConfigException(
"s3_storage_bucket must be configured if `s3` in audio_path",
)
self.s3_loader_bucket = s3_loader_bucket
self.s3_loader_base_dir = s3_loader_base_dir
self.s3_storage_bucket = s3_storage_bucket
self.s3_storage_base_dir = s3_storage_base_dir
self.s3_results_bucket = s3_results_bucket
self.s3_results_base_dir = s3_results_base_dir
if "gcs" in self.audio_path.split(":") and gcs_storage_bucket is None:
raise CyberpunkConfigException(
"gcs_storage_bucket must be configured if `gcs` in audio_path",
)
if (
"gcs" in self.audio_path.split(":")
and google_application_credentials is None
):
raise CyberpunkConfigException(
"google_application_credentials must be configured if `gcs` in audio_path",
)
self.google_application_credentials = google_application_credentials
self.gcs_loader_bucket = gcs_loader_bucket
self.gcs_loader_base_dir = gcs_loader_base_dir
self.gcs_storage_bucket = gcs_storage_bucket
self.gcs_storage_base_dir = gcs_storage_base_dir
self.gcs_results_bucket = gcs_results_bucket
self.gcs_results_base_dir = gcs_results_base_dir
self.jaeger_tracing = jaeger_tracing
self.jaeger_agent_hostname = jaeger_agent_hostname
self.jaeger_agent_port = jaeger_agent_port
self.gcp_tracing = gcp_tracing
def __repr__(self):
return (
f"CyberpunkConfig ( "
f"audio_path: {self.audio_path}, "
f"local_storage_base_dir: {self.local_storage_base_dir}, "
f"local_results_base_dir: {self.local_results_base_dir} "
f")"
)
_CYBERPUNK_CONFIG: Optional[CyberpunkConfig] = None
def configure_config(provided_config: Optional[CyberpunkConfig] = None):
global _CYBERPUNK_CONFIG
if provided_config is not None:
_CYBERPUNK_CONFIG = provided_config
else:
_CYBERPUNK_CONFIG = CyberpunkConfig(
audio_path=os.environ.get("AUDIO_PATH", "local"),
local_storage_base_dir=os.environ.get(
"LOCAL_STORAGE_BASE_DIR",
"testdata/",
),
local_results_base_dir=os.environ.get(
"LOCAL_RESULTS_BASE_DIR",
None,
),
s3_loader_bucket=os.environ.get("S3_LOADER_BUCKET", None),
s3_loader_base_dir=os.environ.get("S3_LOADER_BASE_DIR", None),
s3_storage_bucket=os.environ.get("S3_STORAGE_BUCKET", None),
s3_storage_base_dir=os.environ.get("S3_STORAGE_BASE_DIR", None),
s3_results_bucket=os.environ.get("S3_RESULTS_BUCKET", None),
s3_results_base_dir=os.environ.get("S3_RESULTS_BASE_DIR", None),
google_application_credentials=os.environ.get(
"GOOGLE_APPLICATION_CREDENTIALS",
None,
),
gcs_loader_bucket=os.environ.get("GCS_LOADER_BUCKET", None),
gcs_loader_base_dir=os.environ.get("GCS_LOADER_BASE_DIR", None),
gcs_storage_bucket=os.environ.get("GCS_STORAGE_BUCKET", None),
gcs_storage_base_dir=os.environ.get("GCS_STORAGE_BASE_DIR", None),
gcs_results_bucket=os.environ.get("GCS_RESULTS_BUCKET", None),
gcs_results_base_dir=os.environ.get("GCS_RESULTS_BASE_DIR", None),
jaeger_tracing=os.environ.get(
"JAEGER_TRACING_ENABLED",
"0",
).lower()
in ("true", "1", "t"),
jaeger_agent_hostname=os.environ.get(
"JAEGER_AGENT_HOSTNAME",
"jaeger",
),
jaeger_agent_port=int(os.environ.get("JAEGER_AGENT_PORT", "6831")),
gcp_tracing=os.environ.get("GCP_TRACING_ENABLED", "0").lower()
in ("true", "1", "t"),
)
def get_config() -> CyberpunkConfig:
global _CYBERPUNK_CONFIG
if _CYBERPUNK_CONFIG is None:
configure_config()
assert _CYBERPUNK_CONFIG is not None
return _CYBERPUNK_CONFIG
```
#### File: cyberpunk/storage/__init__.py
```python
import logging
from typing import Dict, Optional, Protocol, Tuple, Type
from uuid import UUID
from pydub import AudioSegment
from cyberpunk.config import get_config
from cyberpunk.storage.audius import AudiusStorage
from cyberpunk.storage.gcs import GCSStorage
from cyberpunk.storage.http import HttpLoader
from cyberpunk.storage.local import LocalStorage
from cyberpunk.storage.s3 import S3Storage
class AudioStorageProtocol(Protocol):
def __init__(self):
"""Declare variables like base dir"""
def __contains__(self, element):
"""same as contains"""
def contains(self, key: str) -> bool:
"""checks if a given key is in the audio store"""
def get_segment(self, key: str) -> Tuple[AudioSegment, str]:
"""get an audio segment from storage"""
class AudioStorage:
def __init__(
self,
audio_path: str,
local_storage_base_dir: Optional[str] = None,
local_results_base_dir: Optional[str] = None,
s3_loader_bucket: Optional[str] = None,
s3_loader_base_dir: Optional[str] = None,
s3_storage_bucket: Optional[str] = None,
s3_storage_base_dir: Optional[str] = None,
s3_results_bucket: Optional[str] = None,
s3_results_base_dir: Optional[str] = None,
):
"""Declare variables like base dir"""
self.http_loader: HttpLoader = HttpLoader()
self.storage_table: Dict[str, Type[AudioStorageProtocol]] = {
"local": LocalStorage,
"s3": S3Storage,
"gcs": GCSStorage,
"audius": AudiusStorage,
}
# local:s3:audius => [LocalStorage(), S3Storage(), AudiusStorage()]
self.audio_path = list(
map(
lambda x: self.storage_table[x](),
audio_path.split(":"),
),
)
def __str__(self):
return ""
def __repr__(self):
return ""
def __contains__(self, element):
return self.contains(element)
def contains(self, key: str) -> bool:
"""checks if a given key is in the audio store"""
for storage in self.audio_path:
if key in storage:
return True
return False
def get_segment(self, key: str) -> Tuple[AudioSegment, str]:
"""get an audio segment from storage"""
if key.startswith("https://") or key.startswith("http://"):
return self.http_loader.get_segment(key)
for storage in self.audio_path:
if storage.contains(key):
return storage.get_segment(key)
raise KeyError(
f"key `{key}` not found in any configured audio store ({self.audio_path})",
)
def save_segment(
self,
request_id: UUID,
segment: AudioSegment,
file_type: str,
) -> str:
"""save an audio segment to storage and return the link to it
@param request_id: the id for the request and the tmp filename
@param segment: the audio data to be saved
@param file_type: the audio type to encode the segment
@return: the tmp location where the processed audio is located
"""
config = get_config()
processed_filename = f"{request_id}.{file_type}"
if (
config.gcs_results_bucket is None
and config.s3_storage_bucket is None
):
LocalStorage().save_segment(segment, processed_filename, file_type)
elif config.gcs_results_bucket is not None:
GCSStorage().save_segment(segment, processed_filename, file_type)
elif config.s3_results_bucket is not None:
S3Storage().save_segment(segment, processed_filename, file_type)
else:
logging.error("que?")
return processed_filename
# Audio Storage Singleton
_AUDIO_STORAGE: Optional[AudioStorage] = None
def configure_storage():
global _AUDIO_STORAGE
config = get_config()
assert config is not None
logging.info(f"configuring audio store")
_AUDIO_STORAGE = AudioStorage(
audio_path=config.audio_path,
local_storage_base_dir=config.local_storage_base_dir,
local_results_base_dir=config.local_results_base_dir,
s3_loader_bucket=config.s3_loader_bucket,
s3_loader_base_dir=config.s3_loader_base_dir,
s3_storage_bucket=config.s3_storage_bucket,
s3_storage_base_dir=config.s3_storage_base_dir,
s3_results_bucket=config.s3_results_bucket,
s3_results_base_dir=config.s3_results_base_dir,
)
def get_storage() -> AudioStorage:
global _AUDIO_STORAGE
if _AUDIO_STORAGE is None:
configure_storage()
assert _AUDIO_STORAGE is not None
return _AUDIO_STORAGE
```
#### File: cyberpunk/storage/local.py
```python
import logging
from functools import lru_cache
from typing import Tuple
from pydub import AudioSegment
from cyberpunk.config import get_config
AUDIO_CACHE_SIZE = 50
class LocalStorage:
def __init__(self):
config = get_config()
self.base_dir = config.local_storage_base_dir
def __contains__(self, element):
return self.contains(element)
def contains(self, key: str) -> bool:
return True
@lru_cache(AUDIO_CACHE_SIZE)
def get_segment(self, key: str) -> Tuple[AudioSegment, str]:
logging.info(f"pulling key from local storage: {key}")
location = f"{key}"
audio_segment = AudioSegment.from_file(
f"{self.base_dir}{location}",
)
return audio_segment, location
def save_segment(self, segment: AudioSegment, key: str, file_type: str):
logging.debug(f"exporting segment {key} to tmp dir")
segment.export(
f"/tmp/{key}",
format=file_type,
)
```
#### File: cyberpunk/transformations/slice.py
```python
from dataclasses import dataclass
from typing import Optional
from pydub import AudioSegment
from cyberpunk.exceptions import (
TransformationInputParseException,
TransformationProcessException,
)
from cyberpunk.transformations import TransformationInput
@dataclass
class SliceInput:
start: Optional[int]
end: Optional[int]
@classmethod
def from_str(cls, arg: str):
try:
start_str, end_str = tuple(arg.split(":"))
start = int(start_str) if start_str != "" else None
end = int(end_str) if end_str != "" else None
except Exception as e:
raise TransformationInputParseException(e)
else:
return SliceInput(
start=start,
end=end,
)
def __iter__(self):
yield "start", self.start
yield "end", self.end
def __str__(self):
return f"{self.start if self.start is not None else ''}:{self.end if self.end is not None else ''}"
class Slice:
def __call__(
self,
segment: AudioSegment,
inputs: TransformationInput,
) -> AudioSegment:
return self.run(segment, inputs)
def run(
self,
segment: AudioSegment,
inputs: TransformationInput,
) -> AudioSegment:
try:
assert isinstance(inputs, SliceInput)
start = inputs.start
end = inputs.end
if start is None and end is None:
raise TransformationProcessException(
"the start and end of a slice can't both be None",
)
if start is None:
sliced_segment = segment[:end]
elif end is None:
sliced_segment = segment[start:]
else:
sliced_segment = segment[start:end]
except Exception as e:
raise TransformationProcessException(e)
else:
return sliced_segment
```
#### File: tests/storage/test_audius_storage.py
```python
import pytest
from pydub import AudioSegment
from cyberpunk.storage import AudiusStorage
@pytest.fixture
def audius_storage():
storage = AudiusStorage()
yield storage
def test_get_segment(audius_storage):
segment = audius_storage.get_segment("7YmNr")
assert segment != AudioSegment.empty()
``` |
{
"source": "jonaylor89/ScholarScraper",
"score": 3
} |
#### File: src/entities/totalcitations.py
```python
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, ForeignKey
from marshmallow import Schema, fields
from .entity import Entity, Base
class TotalCitations(Entity, Base):
__tablename__ = "total-citation"
scholar_id = Column(String(32), ForeignKey("scholar.id"), primary_key=True)
date = Column(DateTime, primary_key=True)
total_cites = Column(Integer)
def __init__(self, scholar_id, total_cites, created_by):
Entity.__init__(self, created_by)
self.scholar_id = scholar_id
self.total_cites = total_cites
self.date = datetime.now()
def __repr__(self):
return f"<TotalCitations(scholar_id='{self.scholar_id}', date='{self.date}', total_cites='{self.total_cites}')>"
class TotalCitationsSchema(Schema):
scholar_id = fields.Str()
date = fields.DateTime()
total_cites = fields.Number()
``` |
{
"source": "jonaylor89/WineInAMillion",
"score": 3
} |
#### File: notebooks/src/nn_script.py
```python
import argparse
import logging
import requests
import boto3
import os
import json
import io
import time
import pandas as pd
import joblib
from sklearn.neighbors import NearestNeighbors
import numpy as np
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CONTENT_TYPE = "application/json"
bucket = "wineinamillion"
prefix = "data/"
filename = "winemag-data-130k-v2.csv"
assert bucket != "<S3_BUCKET>"
assert prefix != "<S3_KEY_PREFIX>"
assert filename != "<DATASET_FILENAME>"
# Read the dataset into memory. This is generally bad practice and in a production environment we'd use a real database to reference against
raw_data_location = f"s3://{bucket}/{prefix}raw/{filename}"
df = pd.read_csv(raw_data_location)
def model_fn(model_dir):
logger.info("model_fn")
logger.info(model_dir)
model = joblib.load(model_dir + "/model.joblib")
logger.info(model)
return model
# Deserialize the Invoke request body into an object we can perform prediction on
def input_fn(serialized_input_data, content_type=CONTENT_TYPE):
logger.info("Deserializing the input data.")
if content_type == CONTENT_TYPE:
data = json.loads(serialized_input_data)
return data
raise Exception(
"Requested unsupported ContentType in content_type: {}".format(content_type)
)
def mergeWineDistances(idx,distance):
wine = df.iloc[idx]
wine['distance'] = distance
return wine
# Perform prediction on the deserialized object, with the loaded model
def predict_fn(input_object, model):
logger.info("Calling model")
start_time = time.time()
print(input_object)
try:
embeddingsVector = [input_object["embeddings"]]
kneighbors = 5
if "kneighbors" in input_object.keys():
kneighbors = input_object["kneighbors"]
print(f"k neighbors {kneighbors}")
distances, neighbors = model.kneighbors(
embeddingsVector, kneighbors, return_distance=True
)
print("--- Inference time: %s seconds ---" % (time.time() - start_time))
print(f"neighbors {neighbors}")
print(f"distances {distances}")
result = list(map(mergeWineDistances, neighbors[0],distances[0]))
print(f"zipped neighbors {pd.DataFrame(result).to_json(orient='records')}")
return pd.DataFrame(result).to_json(orient='records')
except Exception as e:
print(e)
return []
# Serialize the prediction result into the desired response content type
def output_fn(prediction, accept):
logger.info("Serializing the generated output.")
if accept == "application/json":
output = json.dumps({"recommendations": prediction})
return output
raise Exception(
"Requested unsupported ContentType in Accept: {}".format(content_type)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Hyperparameters are described here.
parser.add_argument("--n_neighbors", type=int, default=10)
parser.add_argument("--metric", type=str, default="cosine")
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument("--output-data-dir", type=str)
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--train", type=str, default=os.environ["SM_CHANNEL_TRAIN"])
args = parser.parse_args()
# Load the training data into a Pandas dataframe and make sure it is in the appropriate format
embeddings = pd.read_csv(
os.path.join(args.train, "embeddings.csv.tar.gz"),
compression="gzip",
index_col=False,
header=None,
)
# Supply the hyperparameters of the nearest neighbors model
n_neighbors = args.n_neighbors
metric = args.metric
# Now, fit the nearest neighbors model
nn = NearestNeighbors(n_neighbors=n_neighbors, metric=metric)
model_nn = nn.fit(embeddings)
print("model has been fitted")
# Save the model to the output location in S3
joblib.dump(model_nn, os.path.join(args.model_dir, "model.joblib"))
``` |
{
"source": "jonazpiazu/rocker",
"score": 2
} |
#### File: rocker/test/test_git_extension.py
```python
import argparse
import em
import getpass
import os
import unittest
from pathlib import Path
import pwd
import tempfile
from rocker.core import list_plugins
from rocker.extensions import name_to_argument
from test_extension import plugin_load_parser_correctly
class ExtensionsTest(unittest.TestCase):
def test_name_to_argument(self):
self.assertEqual(name_to_argument('asdf'), '--asdf')
self.assertEqual(name_to_argument('as_df'), '--as-df')
self.assertEqual(name_to_argument('as-df'), '--as-df')
class GitExtensionTest(unittest.TestCase):
def setUp(self):
# Work around interference between empy Interpreter
# stdout proxy and test runner. empy installs a proxy on stdout
# to be able to capture the information.
# And the test runner creates a new stdout object for each test.
# This breaks empy as it assumes that the proxy has persistent
# between instances of the Interpreter class
# empy will error with the exception
# "em.Error: interpreter stdout proxy lost"
em.Interpreter._wasProxyInstalled = False
def test_git_extension(self):
plugins = list_plugins()
git_plugin = plugins['git']
self.assertEqual(git_plugin.get_name(), 'git')
p = git_plugin()
self.assertTrue(plugin_load_parser_correctly(git_plugin))
mock_cliargs = {}
mock_config_file = tempfile.NamedTemporaryFile()
mock_system_config_file = tempfile.NamedTemporaryFile()
mock_cliargs['git_config_path'] = mock_config_file.name
mock_cliargs['git_config_path_system'] = mock_system_config_file.name
args = p.get_docker_args(mock_cliargs)
system_gitconfig = mock_system_config_file.name
system_gitconfig_target = '/etc/gitconfig'
user_gitconfig = mock_config_file.name
user_gitconfig_target = '/root/.gitconfig'
self.assertIn('-v %s:%s' % (system_gitconfig, system_gitconfig_target), args)
self.assertIn('-v %s:%s' % (user_gitconfig, user_gitconfig_target), args)
# Test with user "enabled"
mock_cliargs = {'user': True}
mock_cliargs['git_config_path'] = mock_config_file.name
user_args = p.get_docker_args(mock_cliargs)
user_gitconfig_target = os.path.expanduser('~/.gitconfig')
self.assertIn('-v %s:%s' % (user_gitconfig, user_gitconfig_target), user_args)
# Test with overridden user
mock_cliargs['user_override_name'] = 'testusername'
user_args = p.get_docker_args(mock_cliargs)
user_gitconfig_target = '/home/testusername/.gitconfig'
self.assertIn('-v %s:%s' % (user_gitconfig, user_gitconfig_target), user_args)
# Test non-extant files no generation
mock_cliargs['git_config_path'] = '/path-does-not-exist'
mock_cliargs['git_config_path_system'] = '/path-does-not-exist-either'
user_args = p.get_docker_args(mock_cliargs)
self.assertNotIn('-v', user_args)
``` |
{
"source": "jonbakerfish/HRNet-MaskRCNN-Benchmark",
"score": 2
} |
#### File: HRNet-MaskRCNN-Benchmark/demo/Mask_R-CNN_demo.py
```python
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.abspath(osp.dirname(osp.dirname(__file__)))
# Add lib to PYTHONPATH
lib_path = this_dir
add_path(lib_path)
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
# this makes our figures bigger
pylab.rcParams['figure.figsize'] = 20, 12
import requests
from io import BytesIO
from PIL import Image
import numpy as np
from maskrcnn_benchmark.config import cfg
from df2_predictor import DeepFashion2Demo
config_file = "./configs/df2/mask_rcnn_hrnet_w18_1x.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
# cfg.merge_from_list(["MODEL.DEVICE", ""])
coco_demo = DeepFashion2Demo(
cfg,
min_image_size=800,
confidence_threshold=0.5,
)
def load(fname):
"""
Given an fname of an image, downloads the image and
returns a PIL image
"""
pil_image = Image.open(fname).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def imshow(img):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
plt.show()
for i in range(1,3000):
image = load("/home/jcao/df2/validation/image/%06d.jpg"%i)
predictions = coco_demo.run_on_opencv_image(image)
imshow(predictions)
```
#### File: data/datasets/deepfashion2.py
```python
import os
import cv2
import torch
import torchvision
import PIL.Image as Image
import maskrcnn_benchmark.utils.zipreader as zipreader
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
#class DeepFashion2(torchvision.datasets.coco.CocoDetection):
class DeepFashion2(torch.utils.data.Dataset):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
#super(DeepFashion2, self).__init__(root, ann_file)
from pycocotools.coco import COCO
self.root = root
self.coco = COCO(ann_file)
self.ids = list(self.coco.imgs.keys())
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
self.ids = [
img_id
for img_id in self.ids
if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0
]
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, idx):
# use zipreader, change the function of super.getitem
coco = self.coco
img_id = self.ids[idx]
ann_ids = coco.getAnnIds(imgIds=img_id)
anno = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
# In philly cluster use zipreader instead Image.open
# img = zipreader.imread(os.path.join(self.root, path), \
# cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# img = Image.fromarray(img)
# img = cv2.imread(os.path.join(self.root, path), cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# img = Image.fromarray(img)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
def __len__(self):
return len(self.ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
``` |
{
"source": "jonbakerfish/mmgeneration",
"score": 2
} |
#### File: mmgeneration/demo/ddpm_demo.py
```python
import argparse
import os
import os.path as osp
import sys
import mmcv
import numpy as np
import torch
from mmcv import DictAction
from torchvision import utils
# yapf: disable
sys.path.append(os.path.abspath(os.path.join(__file__, '../..'))) # isort:skip # noqa
from mmgen.apis import init_model, sample_ddpm_model # isort:skip # noqa
# yapf: enable
def parse_args():
parser = argparse.ArgumentParser(description='DDPM demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--save-path',
type=str,
default='./work_dirs/demos/ddpm_samples.png',
help='path to save uncoditional samples')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CUDA device id')
# args for inference/sampling
parser.add_argument(
'--num-batches', type=int, default=4, help='Batch size in inference')
parser.add_argument(
'--num-samples',
type=int,
default=12,
help='The total number of samples')
parser.add_argument(
'--sample-model',
type=str,
default='ema',
help='Which model to use for sampling')
parser.add_argument(
'--sample-cfg',
nargs='+',
action=DictAction,
help='Other customized kwargs for sampling function')
parser.add_argument(
'--same-noise',
action='store_true',
help='whether use same noise as input (x_T)')
parser.add_argument(
'--n-skip',
type=int,
default=25,
help=('Skip how many steps before selecting one to visualize. This is '
'helpful with denoising timestep is too much. Only work with '
'`save-path` is end with \'.gif\'.'))
# args for image grid
parser.add_argument(
'--padding', type=int, default=0, help='Padding in the image grid.')
parser.add_argument(
'--nrow',
type=int,
default=2,
help=('Number of images displayed in each row of the grid. '
'This argument would work only when label is not given.'))
# args for image channel order
parser.add_argument(
'--is-rgb',
action='store_true',
help=('If true, color channels will not be permuted, This option is '
'useful when inference model trained with rgb images.'))
args = parser.parse_args()
return args
def create_gif(results, gif_name, fps=60, n_skip=1):
"""Create gif through imageio.
Args:
frames (torch.Tensor): Image frames, shape like [bz, 3, H, W].
gif_name (str): Saved gif name.
fps (int, optional): Frames per second of the generated gif.
Defaults to 60.
n_skip (int, optional): Skip how many steps before selecting one to
visualize. Defaults to 1.
"""
try:
import imageio
except ImportError:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
frames_list = []
for frame in results[::n_skip]:
frames_list.append(
(frame.permute(1, 2, 0).cpu().numpy() * 255.).astype(np.uint8))
# ensure the final denoising results in frames_list
if not (len(results) % n_skip == 0):
frames_list.append((results[-1].permute(1, 2, 0).cpu().numpy() *
255.).astype(np.uint8))
imageio.mimsave(gif_name, frames_list, 'GIF', fps=fps)
def main():
args = parse_args()
model = init_model(
args.config, checkpoint=args.checkpoint, device=args.device)
if args.sample_cfg is None:
args.sample_cfg = dict()
suffix = osp.splitext(args.save_path)[-1]
if suffix == '.gif':
args.sample_cfg['save_intermedia'] = True
results = sample_ddpm_model(model, args.num_samples, args.num_batches,
args.sample_model, args.same_noise,
**args.sample_cfg)
# save images
mmcv.mkdir_or_exist(os.path.dirname(args.save_path))
if suffix == '.gif':
# concentrate all output of each timestep
results_timestep_list = []
for t in results.keys():
# make grid
results_timestep = utils.make_grid(
results[t], nrow=args.nrow, padding=args.padding)
# unsqueeze at 0, because make grid output is size like [H', W', 3]
results_timestep_list.append(results_timestep[None, ...])
# Concatenates to [n_timesteps, H', W', 3]
results_timestep = torch.cat(results_timestep_list, dim=0)
if not args.is_rgb:
results_timestep = results_timestep[:, [2, 1, 0]]
results_timestep = (results_timestep + 1.) / 2.
create_gif(results_timestep, args.save_path, n_skip=args.n_skip)
else:
if not args.is_rgb:
results = results[:, [2, 1, 0]]
results = (results + 1.) / 2.
utils.save_image(
results, args.save_path, nrow=args.nrow, padding=args.padding)
if __name__ == '__main__':
main()
``` |
{
"source": "jon-bassi/crypto-tools",
"score": 3
} |
#### File: crypto-tools/tools/NGramScore.py
```python
__author__ = 'jon-bassi'
import sys
import math
import Vignere
# load files into memory - increase load time, decrease runtime
mono = 'ngram_data/english_monograms'
bi = 'ngram_data/english_bigrams'
tri = 'ngram_data/english_trigrams'
quad = 'ngram_data/english_quadgrams'
monograms = {}
mN = 0.0
for line in file(mono):
key, score = line.split(' ')
monograms[key.lower()] = float(score)
mN += float(score)
bigrams = {}
bN = 0.0
for line in file(bi):
key, score = line.split(' ')
bigrams[key.lower()] = float(score)
bN += float(score)
trigrams = {}
tN = 0.0
for line in file(tri):
key, score = line.split(' ')
trigrams[key.lower()] = float(score)
tN += float(score)
quadgrams = {}
qN = 0.0
for line in file(quad):
key, score = line.split(' ')
quadgrams[key.lower()] = float(score)
qN += float(score)
def calculate_log_score(text, option):
'''
calculates the log score of the undecrypted text, using mongram -> quadgram statistics
:param text:
:param option:
:return:
'''
ngrams = {}
ngrams_list = []
n = 0
if option == 1:
print 'not supported yet'
ngrams = monograms
n = mN
return
elif option == 2:
print 'not supported yet'
ngrams = bigrams
n = bN
return
elif option == 3:
print 'not supported yet'
ngrams = trigrams
n = tN
return
elif option == 4:
for start in range(0, len(text) - 3):
ngrams_list.append(text[start:start + 4])
ngrams = quadgrams
n = qN
else:
print 'must chose a number between 1 and 4'
return
logScore = 0.0
for ngram in ngrams_list:
if ngram in ngrams:
logScore += float(math.log10(ngrams[ngram] / n))
else:
logScore += float(math.log10(0.01 / n))
return logScore
def iterate_key(key, index, letter):
'''
increment the given index of the key by one letter
:param key:
:param index:
:param letter:
:return:
'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
newText = ''
for idx in range(len(key)):
if idx != index:
newText += key[idx]
else:
newText += alphabet[letter]
return newText
def find_best_key(key, cipherText):
'''
finds the best scoring key by modifying the key one letter at a time and calculating the log score
:param key:
:param cipherText:
:return:
'''
bestKey = key
bestScore = float(calculate_log_score(Vignere.decrypt(cipherText, key), 4))
score = -1
for index in range(0, len(key)):
for letter in range(0, 26):
key = iterate_key(key, index, letter)
score = float(calculate_log_score(Vignere.decrypt(cipherText, key), 4))
if score > bestScore:
bestScore = score
bestKey = key
print ("%s\t%s\t%s" % (key, score, Vignere.decrypt(cipherText,key)))
key = bestKey
return bestKey
if __name__ == "__main__":
if len(sys.argv) != 3:
print 'error executing NGramScore.py\nusage: python NGramScore.py [text] [key size]'
sys.exit(0)
text = sys.argv[1]
keySize = int(sys.argv[2])
key = ''
# could make this a random key
for i in range(0, keySize):
key += 'a'
while True:
bestKey = find_best_key(key, text)
print('%s %s' % (bestKey, Vignere.decrypt(text, bestKey)))
input = raw_input('press enter to try again or type x to exit: ')
if input == 'x':
break
key = bestKey
``` |
{
"source": "JonBau/pyvolt-service",
"score": 3
} |
#### File: pyvolt-service/examples/dummy_simulator.py
```python
import paho.mqtt.client as mqtt
import time
import os
import argparse
def connect(client_name, broker_adress, port=1883):
mqttc = mqtt.Client(client_name, True)
#mqttc.username_pw_set(username, password)
mqttc.on_connect = on_connect # attach function to callback
mqttc.connect(broker_adress, port) # connect to broker
mqttc.loop_start() # start loop to process callback
time.sleep(4) # wait for connection setup to complete
return mqttc
def on_connect(client, userdata, flags, rc):
"""
The callback for when the client receives a CONNACK response from the server.
"""
if rc == 0:
print("connected OK with returned code=", rc)
else:
print("Bad connection with returned code=", rc)
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--quiet", action="store_true", help="switch off output")
args = parser.parse_args()
# parameters
sequence = 1
len_data = 300
client_name = "dummy-simulator"
topic_publish = "/dpsim-powerflow"
# ACS Message Broker
broker_address = "172.17.0.1"
port = 1883
os.chdir(os.path.dirname(__file__))
print(os.getcwd())
mqttc = connect(client_name, broker_address, port)
data_file = r"./sample_data/dpsim_powerflow_record_cigre.txt"
data = []
with open(data_file) as json_file:
for line in json_file:
data.append(line)
while sequence < len_data + 1:
mqttc.publish(topic_publish, data[sequence])
if not args.quiet:
print("Sent data for sequence " + str(sequence) + ": " + data[sequence])
sequence += 1
time.sleep(1)
mqttc.loop_stop()
mqttc.disconnect()
```
#### File: pyvolt-service/interfaces/villas_node_interface.py
```python
from json import dumps, loads
from datetime import datetime
import numpy as np
from pyvolt.results import Results
def read_mapping_file(mapping_file):
"""
Create a list which contains the order in which the data must be received/sent from/to VillasNode.
This mapping is readed from the files villas_node_input_data/villas_node_output_data
@param mapping_file: received message from the server (json.loads(msg.payload)[0])
@return mapping:
- if input is villas_node_input_data --> each element of mapping is a list of length 3: [id, "V", "mag" or "phase"]
- if input is villas_node_output_data --> each element of mapping is a list of length 4: [id, "V", "mag" or "phase", "pf" or "est"]
* if id = "max_err" or "mean_err" or "scenario_flag" --> this element has the length 1: ["max_err" or "mean_err" or "scenario_flag"]
"""
lines = []
with open(mapping_file) as mfile:
for line in mfile:
lines.append(line.strip('\n'))
mapping = [None] * len(lines)
for pos, elem in enumerate(lines):
mapping[pos] = elem.split(".")
return mapping
def receiveVillasNodeInput(system, message, input_mapping_vector):
"""
to store the received data in an object of type acs.state_estimation.results.Results
@system: model of the system (type acs.state_estimation.network.System)
@param message: received message from the server (json.loads(msg.payload)[0])
@param input_mapping_vector: according to villas_node_input.json (see function read_mapping_file)
@return powerflow_results: object type acs.state_estimation.results.Results
"""
data = message['data']
# create a results object to store the received data
powerflow_results = Results(system)
# store the received data in powerflow_results
for node in powerflow_results.nodes:
magnitude = 0.0
phase = 0.0
uuid = node.topology_node.uuid
for idx, elem in enumerate(input_mapping_vector):
# print("elem[0]: {}, uuid: {}".format(elem[0], uuid))
if elem[0] == uuid:
if elem[2] == "mag": # elem[1] = "mag" or "phase"
magnitude = data[idx]
elif elem[2] == "phase":
phase = data[idx]
node.voltage = magnitude * (np.cos(phase) + 1j * np.sin(phase)) / 1000
node.voltage_pu = node.voltage / node.topology_node.baseVoltage
# calculate quantities I, Iinj, S and Sinj
powerflow_results.calculate_all()
return powerflow_results
def sendVillasNodeOutput(message, output_mapping_vector, powerflow_results, state_estimation_results, scenario_flag):
"""
to create the payload according to "villas_node_output.json"
@param message: received message from the server (json.loads(msg.payload)[0])
@param output_mapping_vector: according to villas_node_output.json (see function read_mapping_file)
@param powerflow_results: results of powerflow (type acs.state_estimation.results.Results)
@param state_estimation_results: results of state_estimation (type acs.state_estimation.results.Results)
@param scenario_flag:
@return: string formatted according to "villas_node_output.json"
"""
VillasNodeOutput = {}
VillasNodeOutput["ts"] = {}
VillasNodeOutput["ts"]["origin"] = message["ts"]["origin"]
if "sequence" in message:
VillasNodeOutput["sequence"] = message["sequence"]
else:
print('Sequence no. not available.')
VillasNodeOutput["sequence"] = 1
# calculate Vmag_err
Vmag_err = np.zeros(len(powerflow_results.nodes))
for idx, elem in enumerate(powerflow_results.nodes):
uuid_pf = elem.topology_node.uuid
Vmag_true = np.absolute(elem.voltage)
Vmag_est = np.absolute(state_estimation_results.get_node(uuid=uuid_pf).voltage)
Vmag_err[idx] = np.absolute(Vmag_est - Vmag_true)
Vmag_err[idx] = 100 * np.divide(Vmag_err[idx], Vmag_true)
max_err = np.amax(Vmag_err)
mean_err = np.mean(Vmag_err)
data = [None] * len(output_mapping_vector)
for idx, elem in enumerate(output_mapping_vector):
if elem[0] == "max_err":
data[idx] = max_err
continue
elif elem[0] == "mean_err":
data[idx] = mean_err
continue
elif elem[0] == "scenario_flag":
data[idx] = float(scenario_flag)
continue
else: # elem = ["N4", "V", "phase", "est"] or elem = ["N4", "V", "phase", "pf"]
node = None
if elem[3] == "est":
node = state_estimation_results.get_node(uuid=elem[0])
elif elem[3] == "pf":
node = powerflow_results.get_node(uuid=elem[0])
value = None
if elem[2] == "mag": # elem_data[2] = "mag" or "phase"
value = np.absolute(node.voltage)
elif elem[2] == "phase":
value = np.angle(node.voltage)
data[idx] = value
VillasNodeOutput["data"] = data
return "[" + dumps(VillasNodeOutput) + "]"
def serviceCalculations():
pass
def convertVillasNodeInputToSognoInput(VillasNodeInput, input_mapping_vector, version="1.0", type="se_result"):
"""
@param VillasNode: received message formatted according to "villas_node_input.json"
@param input_mapping_vector: according to villas_node_input.json (result of read_mapping_file)
@param version:
@param type:
@return: json object formatted according to "sogno_input.json"
"""
timestamp = VillasNodeInput["ts"]["origin"]
# sequence = VillasNodeInput["sequence"]
data = VillasNodeInput["data"]
SongoInput = {}
SongoInput["version"] = version
SongoInput["identifier"] = "123456"
SongoInput["type"] = type
SongoInput["readings"] = []
for idx, elem in enumerate(input_mapping_vector):
uuid = elem[0]
type = elem[2] # mag or phase
value = {}
value["id"] = uuid
value["timestamp"] = timestamp
value["phase"] = "a"
value["measurand"] = ""
if type == "mag":
value["measurand"] = "voltage_magnitude"
elif type == "phase":
value["measurand"] = "voltage_angle"
value["data"] = data[idx]
SongoInput["readings"].append(value)
return SongoInput
def convertSognoOutputToVillasNodeOutput(SognoOutput, output_mapping_vector):
"""
@param SognoOutput: string formatted according to the file "sogno_output.json"
@param output_mapping_vector: according to villas_node_input.json (see function read_mapping_file)
@return: string formatted according to "villas_node_output.json"
"""
SognoOutput = loads(SognoOutput)
timestamp_sogno = SognoOutput["nodes"][0]["values"][0]["timestamp"]
# Convert UTC datetime to seconds since January 1, 1970
utc_dt = datetime.strptime(timestamp_sogno, '%Y-%m-%dT%H:%M:%S')
timestamp_villas = (utc_dt - datetime(1970, 1, 1)).total_seconds()
nodes = SognoOutput["nodes"]
data_sogno = {}
for node in nodes:
node_id = node["node_id"]
values = node["values"]
for value in values:
if value["measurand"] == "voltage_magnitude":
data_sogno[node_id + ".mag.est"] = value["data"]
elif value["measurand"] == "voltage_angle":
data_sogno[node_id + ".phase.est"] = value["data"]
data_villas = [0.0] * len(output_mapping_vector)
for idx, elem in enumerate(output_mapping_vector):
if elem[0] == "max_err":
continue
elif elem[0] == "mean_err":
continue
elif elem[0] == "scenario_flag":
continue
elif elem[3] == "pf":
continue
elif elem[3] == "est":
node_id = elem[0]
value_type = elem[2] # phase or magnitude
data_villas[idx] = data_sogno[node_id + "." + value_type + ".est"]
VillasOutput = {}
VillasOutput["ts"] = {}
VillasOutput["ts"]["origin"] = []
VillasOutput["ts"]["origin"].append(timestamp_villas)
VillasOutput["sequence"] = 0
VillasOutput["data"] = data_villas
return dumps([VillasOutput])
``` |
{
"source": "Jon-Becker/soduko-solver-py",
"score": 3
} |
#### File: Jon-Becker/soduko-solver-py/index.py
```python
import math
from copy import deepcopy
from pprint import pprint
import os
import timeit
import random
import sys
is_windows = sys.platform.startswith('win')
def clear():
if is_windows:
os.system('cls')
else:
os.system('clear')
blank = [[2,8,0,0,0,0,0,0,1],
[0,0,0,8,0,1,0,0,4],
[0,0,4,0,7,0,3,0,0],
[0,2,0,0,5,0,0,6,0],
[0,0,3,1,0,9,7,0,0],
[0,1,0,0,8,0,0,5,0],
[0,0,1,0,6,0,8,0,0],
[5,0,0,2,0,3,0,0,0],
[9,0,0,0,0,0,0,1,6]]
class Board:
def __init__(self, board):
self.board = deepcopy(board)
self.backup = deepcopy(board)
self.lastChoice = 0
def getBoard(self):
return self.board
def getBackup(self):
return self.backup
def setBackup(self, board):
self.backup = deepcopy(board)
def restoreBackup(self):
self.board = deepcopy(self.backup)
def isSolved(self):
result = {'solved': True}
flippedBoard = []
for i in range(0,9):
flippedBoard.append([])
blockBoard = []
for i in range(0,9):
blockBoard.append([])
for row, rowArray in enumerate(self.board):
numberFrequency = {}
for col, value in enumerate(rowArray):
blockBoard[math.floor(col/3)+(math.floor(row/3) * 3)].append(value)
flippedBoard[col].append(value)
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"solved": False, "error": "duplicate_in_row"}
for row, rowArray in enumerate(blockBoard):
numberFrequency = {}
for col, value in enumerate(rowArray):
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"solved": False, "error": "duplicate_in_blocks"}
for row, rowArray in enumerate(flippedBoard):
numberFrequency = {}
for col, value in enumerate(rowArray):
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"solved": False, "error": "duplicate_in_col"}
for column in rowArray:
if column == 0:
result = {"solved": False, "error": "empty_space"}
return result
def hasErrors(self, testArray):
result = {'errors': False}
flippedBoard = []
for i in range(0,9):
flippedBoard.append([])
blockBoard = []
for i in range(0,9):
blockBoard.append([])
for row, rowArray in enumerate(testArray):
numberFrequency = {}
for col, value in enumerate(rowArray):
blockBoard[math.floor(col/3)+(math.floor(row/3) * 3)].append(value)
flippedBoard[col].append(value)
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"errors": True, "error": "duplicate_in_row"}
for row, rowArray in enumerate(blockBoard):
numberFrequency = {}
for col, value in enumerate(rowArray):
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"errors": True, "error": "duplicate_in_blocks"}
for row, rowArray in enumerate(flippedBoard):
numberFrequency = {}
for col, value in enumerate(rowArray):
if value in numberFrequency:
numberFrequency[value] += 1
elif value != 0:
numberFrequency[value] = 1
for number, count in numberFrequency.items():
if count > 1:
result = {"errors": True, "error": "duplicate_in_col"}
return result
def placeNumber(self, row, column, value):
oldBoard = deepcopy(self.board)
targetRow = oldBoard[row]
for n, i in enumerate(targetRow):
if n == column:
targetRow[n] = value
newBoard = oldBoard
newBoard[row] = targetRow
if self.hasErrors(newBoard)['errors'] == False:
self.board = deepcopy(newBoard)
return {"errors": False, 'board': newBoard};
else:
return {"errors": True, "error": "illegal_placement"};
def canPlace(self, row, column, value):
oldBoard = deepcopy(self.board)
targetRow = oldBoard[row]
for n, i in enumerate(targetRow):
if n == column:
targetRow[n] = value
newBoard = oldBoard
newBoard[row] = targetRow
if self.hasErrors(newBoard)['errors'] == False:
return {"errors": False, 'board': newBoard};
else:
return {"errors": True, "error": "illegal_placement"};
def positionValue(self, row, column):
oldBoard = deepcopy(self.board)
targetRow = oldBoard[row]
for n, i in enumerate(targetRow):
if n == column:
return i;
break;
sudoku = Board(blank)
clear()
pprint(sudoku.getBoard())
start = timeit.default_timer()
# Find all 0's and then get possibilities for each square and fill in rows with only 1 possibility until there's no more singular possibility slots.
def fillInSingularPossibilities(soduku, n):
potentialBoardValues = []
for i in range(0,9):
potentialBoardValues.append([])
for position in range(0,9):
if sudoku.positionValue(i, position) != 0:
potentialBoardValues[i].append(sudoku.positionValue(i, position))
else:
potentialBoardValues[i].append([])
hasSingular = False
invalidBoard = False
for row, rowArray in enumerate(sudoku.board):
for col, value in enumerate(rowArray):
if value == 0:
for potential in range(1,10):
if sudoku.canPlace(row, col, potential)['errors'] == False:
potentialBoardValues[row][col].append(potential)
if len(potentialBoardValues[row][col]) == 0:
invalidBoard = True
if len(potentialBoardValues[row][col]) == 1:
hasSingular = True
sudoku.placeNumber(row, col, potentialBoardValues[row][col][0])
clear()
pprint(sudoku.getBoard());
if invalidBoard:
invalidBoard = False
soduku.restoreBackup()
if n == -1:
print("Impossible Solve")
quit()
fillInSingularPossibilities(sudoku, -1)
if hasSingular:
hasSingular = False
if n == -1:
sudoku.setBackup(sudoku.getBoard())
fillInSingularPossibilities(sudoku, 0)
else:
if soduku.isSolved()['solved']:
clear()
stop = timeit.default_timer()
print("Solved soduku board in", stop-start, "seconds.")
pprint(soduku.getBoard())
quit()
else:
if n == 1:
soduku.restoreBackup()
fillInSingularPossibilities(sudoku, 1)
else:
for row, rowArray in enumerate(potentialBoardValues):
for col, value in enumerate(rowArray):
if not isinstance(value, int):
choice = random.choice(value)
while choice == sudoku.lastChoice:
choice = random.choice(value)
sudoku.lastChoice = choice
soduku.placeNumber(row, col, choice)
fillInSingularPossibilities(sudoku, 0)
fillInSingularPossibilities(sudoku, -1)
``` |
{
"source": "jonbeeler/openweave-wdlc",
"score": 2
} |
#### File: nwv/validators/changed_number_validator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gwv import schema
from gwv import validator
class ChangedNumberValidation(validator.VisitorComparisonValidator):
"""Checks if any objects were removed or changed type in the schema."""
def visit_Field(self, previous_field):
self.check_number(previous_field)
def visit_EnumPair(self, previous_enumpair):
self.check_number(previous_enumpair)
def visit_Trait(self, previous_trait):
self.check_number(previous_trait)
def visit_Component(self, previous_component):
self.check_number(previous_component)
def check_number(self, previous_obj):
current_obj = self.get_obj_from_current_schema(previous_obj)
if not current_obj:
# Removed object validator will handle this case
return
if current_obj.number != previous_obj.number:
msg = ("The id/tag number for item %s has changed from the "
"previous schema. Changing id/tag numbers is not allowed "
"without backward compatibility." % (previous_obj.full_name))
if previous_obj.get_stability() is schema.Stability.ALPHA:
self.add_warning(msg)
else:
self.add_failure(msg)
process = ChangedNumberValidation.process
```
#### File: nwv/validators/iface_mapping_validator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gwv import validator
class IfaceMappingValidator(validator.VisitorValidator):
"""Validate that iface mapping is valid."""
def visit_Group(self, group):
for iface_component in group.interface.component_list:
if group.component_list.by_name(iface_component.base_name):
# First check if there's an explicit mapping for this component
source_component = group.component_list.by_name(
iface_component.base_name).source_component
else:
# If not, check for an implicit mapping
source_component = group.parent.component_list.by_name(
iface_component.base_name)
if source_component is None:
self.add_failure("No valid mapping from trait {} to {}".format(
iface_component.full_name, group.parent.full_name))
return
if source_component.trait != iface_component.trait:
self.add_failure("Trait type of {} does not match {}.".format(
source_component.full_name, iface_component.full_name))
return
def visit_GroupComponentRef(self, ref):
if not ref.source_component:
self.add_failure("Invalid mapping for {}".format(ref.full_name))
return
process = IfaceMappingValidator.process
```
#### File: nwv/validators/no_new_java_outer_classname_validator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gwv import validator
class NoNewJavaOuterClassnameValidator(validator.VisitorComparisonValidator):
"""Checks if java_outer_classname was removed or added in schema."""
def _check_for_java_outer_classname(self, previous_obj):
current_obj = self.get_obj_from_current_schema(previous_obj)
# Only check on objects that have not been deleted.
if (current_obj):
if (current_obj.java_outer_classname is not None and
(previous_obj is None or previous_obj.java_outer_classname is None)):
self.add_failure("option java_outer_classname should not be added "
"to new schema.")
elif (current_obj.java_outer_classname is None and
previous_obj.java_outer_classname is not None):
self.add_failure("option java_outer_classname should not be removed "
"from existing schema.")
elif current_obj.java_outer_classname != previous_obj.java_outer_classname:
self.add_failure("option java_outer_classname should not be changed.")
def visit_Trait(self, previous_obj):
self._check_for_java_outer_classname(previous_obj)
def visit_Typespace(self, previous_obj):
self._check_for_java_outer_classname(previous_obj)
def visit_Interface(self, previous_obj):
self._check_for_java_outer_classname(previous_obj)
def visit_Resource(self, previous_obj):
self._check_for_java_outer_classname(previous_obj)
process = NoNewJavaOuterClassnameValidator.process
```
#### File: nwv/validators/stability_reference_validator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gwv import schema
from gwv import validator
def _find_containing_name(obj):
while obj:
if (isinstance(obj, schema.StructEnumCollectionBase) or
isinstance(obj, schema.TraitCollectionBase)):
return type(obj).__name__, obj.full_name
obj = obj.parent
return None, None
def _check_references_current_version(parent, child):
"""Checks references of the current version."""
if parent is None or child is None or parent.get_version_map() is None:
return False
included_version = parent.get_version_map().get_child_version(
parent.get_version(),
_find_containing_name(child)[1])
return included_version == child.get_version()
class StabilityReferenceValidator(validator.VisitorValidator):
"""Checks if references have consistent stability."""
def visit_Field(self, field):
if _check_references_current_version(field, field.struct_type or
field.enum_type):
self.compare_stability(field, field.struct_type or field.enum_type)
def compare_stability(self, value, referenced_value):
if (value is None) or (referenced_value is None):
return
if (value.get_stability() is None) or (referenced_value.get_stability() is
None):
return
if value.get_stability().value > referenced_value.get_stability().value:
value_type, value_name = _find_containing_name(value)
referenced_value_type, referenced_value_name = (
_find_containing_name(referenced_value))
self.add_failure("{} {} has stability {}, and references {}, which has "
"stability {}. {}s can only reference {}s at an "
"equal or higher stability level.".format(
value_type, value_name,
value.get_stability().name, referenced_value_name,
referenced_value.get_stability().name, value_type,
referenced_value_type))
return True
def visit_Trait(self, trait):
if trait.extends and _check_references_current_version(
trait, trait.extends):
self.compare_stability(trait, trait.extends)
def visit_Event(self, event):
if event.extends and _check_references_current_version(
event, event.extends):
self.compare_stability(event, event.extends)
def visit_Command(self, command):
if command.extends and _check_references_current_version(
command, command.extends):
self.compare_stability(command, command.extends)
def visit_CommandResponse(self, command_response):
if command_response.extends and _check_references_current_version(
command_response, command_response.extends):
self.compare_stability(command_response, command_response.extends)
def visit_Struct(self, struct):
if struct.extends and _check_references_current_version(
struct, struct.extends):
self.compare_stability(struct, struct.extends)
def visit_Component(self, component):
if _check_references_current_version(component.parent, component.trait):
self.compare_stability(component.parent, component.trait)
def visit_Group(self, group):
if _check_references_current_version(group.parent, group.interface):
self.compare_stability(group.parent, group.interface)
process = StabilityReferenceValidator.process
```
#### File: nwv/validators/test_map_validator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from gwv import schema
from gwv import validator
from nwv.validators import map_validator
class MapValidatorTests(validator.ValidatorTestCase):
"""Check that map keys are valid."""
def test_bad_map(self):
map_key = schema.Field('key', 1, '', schema.Field.DataType.UINT64, None)
field = schema.Field('invalid_map', 1000, '', schema.Field.DataType.STRUCT,
None)
field.is_map = True
field.map_key = map_key
self.get_test_trait().state_list.append(field)
self.assert_invalid(map_validator.MapValidator,
'64 bit keys are not allowed in map keys.')
if __name__ == '__main__':
unittest.main()
```
#### File: nwv/validators/test_min_version_validator.py
```python
from __future__ import absolute_import
import unittest
from gwv import schema
from gwv import validator
from nwv.validators import min_version_validator
class MinVersionValidatorTests(validator.ValidatorTestCase):
"""Test for MinVersionValidator validator."""
def test_invalid_min_version_on_state_field(self):
trait = self.get_test_trait()
trait.version = 2
field = schema.Field('invalid_version', 1000, '',
schema.Field.DataType.STRUCT, None)
field.min_version = 3
trait.state_list.append(field)
self.assert_invalid(min_version_validator.MinVersionValidator,
'Fields cannot have a min_version > trait '
'version.')
def test_invalid_min_version_on_enum_value(self):
trait = self.get_test_trait()
trait.version = 2
enum = self.get_test_enum()
enum.pair_list[0].min_version = 3
self.assert_invalid(min_version_validator.MinVersionValidator,
'Enum values cannot have a min_version > trait '
'version.')
def test_invalid_min_version_on_resource_trait(self):
resource = self.get_test_resource()
resource_component = self.get_test_resource_component()
resource.version = 2
resource_component.min_version = 3
self.assert_invalid(min_version_validator.MinVersionValidator,
'Trait instances cannot have a min_version > '
'resource version.')
def test_invalid_min_version_on_iface_implements(self):
resource = self.get_test_resource()
iface_implements = self.get_test_group()
resource.version = 2
iface_implements.min_version = 3
self.assert_invalid(min_version_validator.MinVersionValidator,
'Iface implementations cannot have a min_version '
'> resource version.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jon-bell/BeDivFuzz",
"score": 3
} |
#### File: BeDivFuzz/scripts/gen_crash_table.py
```python
import csv, os, sys
import numpy as np
from tabulate import tabulate
def get_log_crash_stats(approach, benchmark, base_dir, num_trials=30):
results_dir = os.path.join(base_dir, 'java-data')
all_crashes = {}
for i in range(1, num_trials+1):
trial_dir = os.path.join(results_dir, '-'.join([approach, benchmark, str(i)]))
failures_dir = os.path.join(trial_dir, 'failures')
log_data = os.path.join(trial_dir, 'fuzz.log')
# Crashes found during this run
crashes = {}
# Check if failures folder contains failing inputs
if len(os.listdir(failures_dir)) != 0:
crash_count = 0
with open(log_data, 'r') as logfile:
lines = logfile.readlines()
for line in lines:
if 'Found crash:' in line:
crash_info = line.split(' ')
crash_time = int(crash_info[0])
crash_id = crash_info[5]
# We skip these errors since they have resulted from the technique itself
if 'OutOfMemoryError' in crash_id:
continue
# Add first occurence of crash to dict
if not crash_id in crashes:
crashes[crash_id] = crash_time
crash_count += 1
# Update all crashes found
for crash_id in crashes.keys():
if crash_id in all_crashes:
all_crashes[crash_id].append(crashes[crash_id])
else:
all_crashes[crash_id] = [crashes[crash_id]]
return all_crashes
def aggregate_crash_stats(crash_dict, num_trials=30):
result_dict = {}
for crash in crash_dict.keys():
crash_times = crash_dict[crash]
mean_crash_time = np.mean(crash_times)
reliability = len(crash_times)/num_trials * 100
result_dict[crash] = (mean_crash_time, reliability)
return result_dict
# Main
if len(sys.argv) < 2:
print(f'Usage: python {sys.argv[0]} results_dir [num_trials]')
sys.exit()
base_dir = sys.argv[1]
if not os.path.isdir(base_dir):
print(f"Usage: python {sys.argv[0]} results_dir [num_trials]")
print("ERROR: {} is not a directory".format(base_dir))
sys.exit()
num_trials = int(sys.argv[2]) if len(sys.argv) > 2 else 30
approaches = ['bediv-simple', 'bediv-structure', 'zest','quickcheck', 'rl']
benchmarks = ['ant', 'maven', 'closure', 'rhino', 'tomcat', 'nashorn']
all_rows = []
all_rows.append(['Crash-ID', 'bediv-simple', 'bediv-structure', 'zest','quickcheck', 'rl'])
for bench in benchmarks:
crashes = set()
results = []
for approach in approaches:
result = aggregate_crash_stats(get_log_crash_stats(approach, bench, base_dir, num_trials))
results.append(result)
crashes.update(result.keys())
for crash in crashes:
row = []
row.append(bench + '.' + crash[crash.find('java.lang.')+10:])
for result in results:
if crash in result:
# Convert to minute
mean_t_crash = result[crash][0]/60000
# Some bugs may have been found under a minute
if mean_t_crash >= 1:
row.append("%.0f (%d\%%)" % (result[crash][0]/60000, result[crash][1]))
else:
row.append("<1 (%d\%%)" % (result[crash][1]))
else:
row.append('-')
all_rows.append(row)
output = tabulate(all_rows, tablefmt='fancy_grid') + "\n\t\tTable 1: Average time (in minutes) and reliability of triggering a particular crash."
with open (os.path.join(base_dir,'crash_table.txt'), 'w') as f:
f.write(output)
print(output)
``` |
{
"source": "jonberenguer/dockerfiles",
"score": 3
} |
#### File: dockerfiles/docker-management/mgmt-docker.py
```python
import subprocess
def runcommand(cmd):
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
print(output.strip())
# Do something else
return_code = process.poll()
if return_code is not None:
print('RETURN CODE', return_code)
# Process has finished, read rest of the output
for output in process.stdout.readlines():
print(output.strip())
break
BASEFILE="baseline-docker-images.txt"
docker_rmi = "sudo docker rmi".split(" ")
docker_pull = "sudo docker pull".split(" ")
def refresh_images():
with open(BASEFILE, 'r') as fobj:
for line in fobj:
cmd = docker_rmi + [line.strip()]
print(' '.join(cmd))
runcommand(cmd)
cmd = docker_pull + [line.strip()]
print(' '.join(cmd))
runcommand(cmd)
def app():
refresh_images()
if __name__ == "__main__":
app()
``` |
{
"source": "jonberenguer/oryx-complex-macros",
"score": 3
} |
#### File: oryx-complex-macros/custom_macros/new_mapping.py
```python
import re
import csv
from pathlib import Path
def wrapper(x):
if x.isupper():
return "SS_LSFT(SS_TAP(X_{})) SS_DELAY(100) ".format(x.upper())
elif x == " ":
return "SS_TAP(X_SPACE) SS_DELAY(100) "
else:
return "SS_TAP(X_{}) SS_DELAY(100) ".format(x.upper())
def outputidmap(string):
concat_macro = ""
for x in string:
concat_macro += wrapper(x)
# removes the list delay code
return "{}".format(concat_macro[:-15])
def outputmacro(string):
concat_macro = ""
for x in string:
concat_macro += wrapper(x)
return " SEND_STRING({});".format(concat_macro[:-1])
def new_macro(idmap, string):
key = outputidmap(idmap)
value = outputmacro(string)
return "{}\t{}".format(key, value)
def update_keymap(customcsv, keymapfile):
keymap = Path(keymapfile)
keymaptxt = keymap.read_text()
csvfile = Path(customcsv).read_text()
reader = csv.reader(csvfile.splitlines(), delimiter='\t')
for row in reader:
escword = re.escape("{}".format(row[0]))
keymaptxt = re.sub(r'.*' + escword + r'.*' , row[1], keymaptxt)
#print("replaced: {}".format(row[0]))
keymap.write_text(keymaptxt)
def post_fix(keymapfile):
keymap = Path(keymapfile)
keymaptxt = keymap.read_text()
escword = re.escape("SEND_STRING(SS_LSFT(SS_TAP(X_SCOLON))")
keymaptxt = re.sub(escword, "SEND_STRING(SS_TAP(X_ESCAPE) SS_DELAY(100) SS_LSFT(SS_TAP(X_SCOLON))", keymaptxt)
keymap.write_text(keymaptxt)
``` |
{
"source": "jonberling/RollShare",
"score": 2
} |
#### File: jonberling/RollShare/run_webserver.py
```python
import http.server
PORT = 8000
class NoCacheHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def send_response_only(self, code, message=None):
super().send_response_only(code, message)
self.send_header('Cache-Control', 'no-store, must-revalidate')
self.send_header('Expires', '0')
if __name__ == '__main__':
http.server.test(HandlerClass=NoCacheHTTPRequestHandler, port=PORT)
``` |
{
"source": "jonberthet/EuroTrip_2018",
"score": 2
} |
#### File: swagger_client/models/activity.py
```python
from pprint import pformat
from six import iteritems
import re
class Activity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, completion_percent=None, log=None, parameters=None, created_at=None, updated_at=None, environments=None, project=None, completed_at=None, state=None, result=None, started_at=None, type=None, payload=None):
"""
Activity - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'completion_percent': 'int',
'log': 'str',
'parameters': 'object',
'created_at': 'datetime',
'updated_at': 'datetime',
'environments': 'list[str]',
'project': 'str',
'completed_at': 'datetime',
'state': 'str',
'result': 'str',
'started_at': 'datetime',
'type': 'str',
'payload': 'object'
}
self.attribute_map = {
'completion_percent': 'completion_percent',
'log': 'log',
'parameters': 'parameters',
'created_at': 'created_at',
'updated_at': 'updated_at',
'environments': 'environments',
'project': 'project',
'completed_at': 'completed_at',
'state': 'state',
'result': 'result',
'started_at': 'started_at',
'type': 'type',
'payload': 'payload'
}
self._completion_percent = completion_percent
self._log = log
self._parameters = parameters
self._created_at = created_at
self._updated_at = updated_at
self._environments = environments
self._project = project
self._completed_at = completed_at
self._state = state
self._result = result
self._started_at = started_at
self._type = type
self._payload = payload
@property
def completion_percent(self):
"""
Gets the completion_percent of this Activity.
:return: The completion_percent of this Activity.
:rtype: int
"""
return self._completion_percent
@completion_percent.setter
def completion_percent(self, completion_percent):
"""
Sets the completion_percent of this Activity.
:param completion_percent: The completion_percent of this Activity.
:type: int
"""
if completion_percent is None:
raise ValueError("Invalid value for `completion_percent`, must not be `None`")
self._completion_percent = completion_percent
@property
def log(self):
"""
Gets the log of this Activity.
:return: The log of this Activity.
:rtype: str
"""
return self._log
@log.setter
def log(self, log):
"""
Sets the log of this Activity.
:param log: The log of this Activity.
:type: str
"""
if log is None:
raise ValueError("Invalid value for `log`, must not be `None`")
self._log = log
@property
def parameters(self):
"""
Gets the parameters of this Activity.
:return: The parameters of this Activity.
:rtype: object
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this Activity.
:param parameters: The parameters of this Activity.
:type: object
"""
if parameters is None:
raise ValueError("Invalid value for `parameters`, must not be `None`")
self._parameters = parameters
@property
def created_at(self):
"""
Gets the created_at of this Activity.
:return: The created_at of this Activity.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this Activity.
:param created_at: The created_at of this Activity.
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`")
self._created_at = created_at
@property
def updated_at(self):
"""
Gets the updated_at of this Activity.
:return: The updated_at of this Activity.
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""
Sets the updated_at of this Activity.
:param updated_at: The updated_at of this Activity.
:type: datetime
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`")
self._updated_at = updated_at
@property
def environments(self):
"""
Gets the environments of this Activity.
:return: The environments of this Activity.
:rtype: list[str]
"""
return self._environments
@environments.setter
def environments(self, environments):
"""
Sets the environments of this Activity.
:param environments: The environments of this Activity.
:type: list[str]
"""
if environments is None:
raise ValueError("Invalid value for `environments`, must not be `None`")
self._environments = environments
@property
def project(self):
"""
Gets the project of this Activity.
:return: The project of this Activity.
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""
Sets the project of this Activity.
:param project: The project of this Activity.
:type: str
"""
if project is None:
raise ValueError("Invalid value for `project`, must not be `None`")
self._project = project
@property
def completed_at(self):
"""
Gets the completed_at of this Activity.
:return: The completed_at of this Activity.
:rtype: datetime
"""
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
"""
Sets the completed_at of this Activity.
:param completed_at: The completed_at of this Activity.
:type: datetime
"""
if completed_at is None:
raise ValueError("Invalid value for `completed_at`, must not be `None`")
self._completed_at = completed_at
@property
def state(self):
"""
Gets the state of this Activity.
:return: The state of this Activity.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this Activity.
:param state: The state of this Activity.
:type: str
"""
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`")
self._state = state
@property
def result(self):
"""
Gets the result of this Activity.
:return: The result of this Activity.
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""
Sets the result of this Activity.
:param result: The result of this Activity.
:type: str
"""
if result is None:
raise ValueError("Invalid value for `result`, must not be `None`")
self._result = result
@property
def started_at(self):
"""
Gets the started_at of this Activity.
:return: The started_at of this Activity.
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""
Sets the started_at of this Activity.
:param started_at: The started_at of this Activity.
:type: datetime
"""
if started_at is None:
raise ValueError("Invalid value for `started_at`, must not be `None`")
self._started_at = started_at
@property
def type(self):
"""
Gets the type of this Activity.
:return: The type of this Activity.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Activity.
:param type: The type of this Activity.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
@property
def payload(self):
"""
Gets the payload of this Activity.
:return: The payload of this Activity.
:rtype: object
"""
return self._payload
@payload.setter
def payload(self, payload):
"""
Sets the payload of this Activity.
:param payload: The payload of this Activity.
:type: object
"""
if payload is None:
raise ValueError("Invalid value for `payload`, must not be `None`")
self._payload = payload
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Activity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/domainupdate.py
```python
from pprint import pformat
from six import iteritems
import re
class Domainupdate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, ssl=None, wildcard=None):
"""
Domainupdate - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'ssl': 'DomainSSLupdate',
'wildcard': 'bool'
}
self.attribute_map = {
'ssl': 'ssl',
'wildcard': 'wildcard'
}
self._ssl = ssl
self._wildcard = wildcard
@property
def ssl(self):
"""
Gets the ssl of this Domainupdate.
:return: The ssl of this Domainupdate.
:rtype: DomainSSLupdate
"""
return self._ssl
@ssl.setter
def ssl(self, ssl):
"""
Sets the ssl of this Domainupdate.
:param ssl: The ssl of this Domainupdate.
:type: DomainSSLupdate
"""
if ssl is None:
raise ValueError("Invalid value for `ssl`, must not be `None`")
self._ssl = ssl
@property
def wildcard(self):
"""
Gets the wildcard of this Domainupdate.
:return: The wildcard of this Domainupdate.
:rtype: bool
"""
return self._wildcard
@wildcard.setter
def wildcard(self, wildcard):
"""
Sets the wildcard of this Domainupdate.
:param wildcard: The wildcard of this Domainupdate.
:type: bool
"""
if wildcard is None:
raise ValueError("Invalid value for `wildcard`, must not be `None`")
self._wildcard = wildcard
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Domainupdate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/environmentupdate.py
```python
from pprint import pformat
from six import iteritems
import re
class Environmentupdate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, enable_smtp=None, name=None, parent=None, title=None, restrict_robots=None, http_access=None):
"""
Environmentupdate - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'enable_smtp': 'bool',
'name': 'str',
'parent': 'str',
'title': 'str',
'restrict_robots': 'bool',
'http_access': 'HttpAccessupdate'
}
self.attribute_map = {
'enable_smtp': 'enable_smtp',
'name': 'name',
'parent': 'parent',
'title': 'title',
'restrict_robots': 'restrict_robots',
'http_access': 'http_access'
}
self._enable_smtp = enable_smtp
self._name = name
self._parent = parent
self._title = title
self._restrict_robots = restrict_robots
self._http_access = http_access
@property
def enable_smtp(self):
"""
Gets the enable_smtp of this Environmentupdate.
:return: The enable_smtp of this Environmentupdate.
:rtype: bool
"""
return self._enable_smtp
@enable_smtp.setter
def enable_smtp(self, enable_smtp):
"""
Sets the enable_smtp of this Environmentupdate.
:param enable_smtp: The enable_smtp of this Environmentupdate.
:type: bool
"""
if enable_smtp is None:
raise ValueError("Invalid value for `enable_smtp`, must not be `None`")
self._enable_smtp = enable_smtp
@property
def name(self):
"""
Gets the name of this Environmentupdate.
:return: The name of this Environmentupdate.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Environmentupdate.
:param name: The name of this Environmentupdate.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def parent(self):
"""
Gets the parent of this Environmentupdate.
:return: The parent of this Environmentupdate.
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this Environmentupdate.
:param parent: The parent of this Environmentupdate.
:type: str
"""
if parent is None:
raise ValueError("Invalid value for `parent`, must not be `None`")
self._parent = parent
@property
def title(self):
"""
Gets the title of this Environmentupdate.
:return: The title of this Environmentupdate.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this Environmentupdate.
:param title: The title of this Environmentupdate.
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`")
self._title = title
@property
def restrict_robots(self):
"""
Gets the restrict_robots of this Environmentupdate.
:return: The restrict_robots of this Environmentupdate.
:rtype: bool
"""
return self._restrict_robots
@restrict_robots.setter
def restrict_robots(self, restrict_robots):
"""
Sets the restrict_robots of this Environmentupdate.
:param restrict_robots: The restrict_robots of this Environmentupdate.
:type: bool
"""
if restrict_robots is None:
raise ValueError("Invalid value for `restrict_robots`, must not be `None`")
self._restrict_robots = restrict_robots
@property
def http_access(self):
"""
Gets the http_access of this Environmentupdate.
:return: The http_access of this Environmentupdate.
:rtype: HttpAccessupdate
"""
return self._http_access
@http_access.setter
def http_access(self, http_access):
"""
Sets the http_access of this Environmentupdate.
:param http_access: The http_access of this Environmentupdate.
:type: HttpAccessupdate
"""
if http_access is None:
raise ValueError("Invalid value for `http_access`, must not be `None`")
self._http_access = http_access
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Environmentupdate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/vpn_configuration.py
```python
from pprint import pformat
from six import iteritems
import re
class VpnConfiguration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, esp=None, margintime=None, secret=None, version=None, gateway_ip=None, ike=None, ikelifetime=None, lifetime=None, remote_subnets=None):
"""
VpnConfiguration - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'esp': 'str',
'margintime': 'str',
'secret': 'str',
'version': 'int',
'gateway_ip': 'str',
'ike': 'str',
'ikelifetime': 'str',
'lifetime': 'str',
'remote_subnets': 'list[str]'
}
self.attribute_map = {
'esp': 'esp',
'margintime': 'margintime',
'secret': 'secret',
'version': 'version',
'gateway_ip': 'gateway_ip',
'ike': 'ike',
'ikelifetime': 'ikelifetime',
'lifetime': 'lifetime',
'remote_subnets': 'remote_subnets'
}
self._esp = esp
self._margintime = margintime
self._secret = secret
self._version = version
self._gateway_ip = gateway_ip
self._ike = ike
self._ikelifetime = ikelifetime
self._lifetime = lifetime
self._remote_subnets = remote_subnets
@property
def esp(self):
"""
Gets the esp of this VpnConfiguration.
:return: The esp of this VpnConfiguration.
:rtype: str
"""
return self._esp
@esp.setter
def esp(self, esp):
"""
Sets the esp of this VpnConfiguration.
:param esp: The esp of this VpnConfiguration.
:type: str
"""
if esp is None:
raise ValueError("Invalid value for `esp`, must not be `None`")
self._esp = esp
@property
def margintime(self):
"""
Gets the margintime of this VpnConfiguration.
:return: The margintime of this VpnConfiguration.
:rtype: str
"""
return self._margintime
@margintime.setter
def margintime(self, margintime):
"""
Sets the margintime of this VpnConfiguration.
:param margintime: The margintime of this VpnConfiguration.
:type: str
"""
if margintime is None:
raise ValueError("Invalid value for `margintime`, must not be `None`")
self._margintime = margintime
@property
def secret(self):
"""
Gets the secret of this VpnConfiguration.
:return: The secret of this VpnConfiguration.
:rtype: str
"""
return self._secret
@secret.setter
def secret(self, secret):
"""
Sets the secret of this VpnConfiguration.
:param secret: The secret of this VpnConfiguration.
:type: str
"""
if secret is None:
raise ValueError("Invalid value for `secret`, must not be `None`")
self._secret = secret
@property
def version(self):
"""
Gets the version of this VpnConfiguration.
:return: The version of this VpnConfiguration.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this VpnConfiguration.
:param version: The version of this VpnConfiguration.
:type: int
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`")
self._version = version
@property
def gateway_ip(self):
"""
Gets the gateway_ip of this VpnConfiguration.
:return: The gateway_ip of this VpnConfiguration.
:rtype: str
"""
return self._gateway_ip
@gateway_ip.setter
def gateway_ip(self, gateway_ip):
"""
Sets the gateway_ip of this VpnConfiguration.
:param gateway_ip: The gateway_ip of this VpnConfiguration.
:type: str
"""
if gateway_ip is None:
raise ValueError("Invalid value for `gateway_ip`, must not be `None`")
self._gateway_ip = gateway_ip
@property
def ike(self):
"""
Gets the ike of this VpnConfiguration.
:return: The ike of this VpnConfiguration.
:rtype: str
"""
return self._ike
@ike.setter
def ike(self, ike):
"""
Sets the ike of this VpnConfiguration.
:param ike: The ike of this VpnConfiguration.
:type: str
"""
if ike is None:
raise ValueError("Invalid value for `ike`, must not be `None`")
self._ike = ike
@property
def ikelifetime(self):
"""
Gets the ikelifetime of this VpnConfiguration.
:return: The ikelifetime of this VpnConfiguration.
:rtype: str
"""
return self._ikelifetime
@ikelifetime.setter
def ikelifetime(self, ikelifetime):
"""
Sets the ikelifetime of this VpnConfiguration.
:param ikelifetime: The ikelifetime of this VpnConfiguration.
:type: str
"""
if ikelifetime is None:
raise ValueError("Invalid value for `ikelifetime`, must not be `None`")
self._ikelifetime = ikelifetime
@property
def lifetime(self):
"""
Gets the lifetime of this VpnConfiguration.
:return: The lifetime of this VpnConfiguration.
:rtype: str
"""
return self._lifetime
@lifetime.setter
def lifetime(self, lifetime):
"""
Sets the lifetime of this VpnConfiguration.
:param lifetime: The lifetime of this VpnConfiguration.
:type: str
"""
if lifetime is None:
raise ValueError("Invalid value for `lifetime`, must not be `None`")
self._lifetime = lifetime
@property
def remote_subnets(self):
"""
Gets the remote_subnets of this VpnConfiguration.
:return: The remote_subnets of this VpnConfiguration.
:rtype: list[str]
"""
return self._remote_subnets
@remote_subnets.setter
def remote_subnets(self, remote_subnets):
"""
Sets the remote_subnets of this VpnConfiguration.
:param remote_subnets: The remote_subnets of this VpnConfiguration.
:type: list[str]
"""
if remote_subnets is None:
raise ValueError("Invalid value for `remote_subnets`, must not be `None`")
self._remote_subnets = remote_subnets
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VpnConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
``` |
{
"source": "jon-betts/pyramid_services",
"score": 2
} |
#### File: src/pyramid_services/__init__.py
```python
from pyramid.config import PHASE2_CONFIG
from pyramid.interfaces import IRequest
from wired import ServiceRegistry
from zope.interface import Interface
_marker = object()
class IServiceRegistry(Interface):
""" A marker interface for the service registry."""
class SingletonServiceWrapper:
def __init__(self, service):
self.service = service
def __call__(self, context, request):
return self.service
class ProxyFactory:
def __init__(self, factory):
self.factory = factory
def __call__(self, container):
request = container.get(IRequest)
return self.factory(container.context, request)
class NewServiceContainer:
"""
Event emitted when a request creates a service container.
This is useful for registering any per-request services like as a way
to inject ``request.tm`` into your container as the transaction manager.
:ivar container: The service container.
:ivar request: The request.
"""
def __init__(self, container, request):
self.container = container
self.request = request
def includeme(config):
config.add_request_method(find_service_factory)
config.add_request_method(find_service)
config.add_request_method(get_services, "services", reify=True)
config.add_directive("set_service_registry", set_service_registry)
config.add_directive("register_service", register_service)
config.add_directive("register_service_factory", register_service_factory)
config.add_directive("find_service_factory", find_service_factory)
config.set_service_registry(ServiceRegistry())
def set_service_registry(config, registry):
def register():
config.registry.registerUtility(registry, IServiceRegistry)
intr = config.introspectable(
category_name="service registry",
discriminator="service registry",
title="service registry",
type_name="service registry",
)
intr["registry"] = registry
config.action(
"service registry",
register,
introspectables=(intr,),
order=PHASE2_CONFIG,
)
def register_service(config, service, iface=Interface, context=None, name=""):
service = config.maybe_dotted(service)
service_factory = SingletonServiceWrapper(service)
config.register_service_factory(
service_factory, iface, context=context, name=name
)
def register_service_factory(
config, service_factory, iface=Interface, context=None, name=""
):
service_factory = config.maybe_dotted(service_factory)
iface = config.maybe_dotted(iface)
context = config.maybe_dotted(context)
def register():
registry = config.registry.getUtility(IServiceRegistry)
registry.register_factory(
ProxyFactory(service_factory), iface, context=context, name=name
)
discriminator = ("service factories", (iface, context, name))
if isinstance(service_factory, SingletonServiceWrapper):
type_name = _type_name(service_factory.service)
else:
type_name = _type_name(service_factory)
intr = config.introspectable(
category_name="pyramid_services",
discriminator=discriminator,
title=str((_type_name(iface), _type_name(context), name)),
type_name=type_name,
)
intr["name"] = name
intr["type"] = iface
intr["context"] = context
config.action(discriminator, register, introspectables=(intr,))
def find_service_factory(
config_or_request, iface=Interface, context=None, name=""
):
registry = config_or_request.registry.getUtility(IServiceRegistry)
factory = registry.find_factory(iface, context=context, name=name)
if factory is None:
raise LookupError("could not find registered service")
if isinstance(factory, ProxyFactory):
return factory.factory
return factory
def get_services(request):
registry = request.registry.getUtility(IServiceRegistry)
container = registry.create_container()
container.register_singleton(request, IRequest)
request.add_finished_callback(cleanup_request)
request.registry.notify(NewServiceContainer(container, request))
return container
def cleanup_request(request):
request.__dict__.pop('services', None)
def find_service(request, iface=Interface, context=_marker, name=""):
if context is _marker:
context = getattr(request, "context", None)
return request.services.get(iface, context=context, name=name)
def _type_name(obj):
name = getattr(obj, "__name__", None)
if name is None:
name = type(obj).__name__
return name
``` |
{
"source": "jonbgallant/azure-iot-sdk-python",
"score": 3
} |
#### File: device/samples/iothub_client_args.py
```python
import getopt
from iothub_client import IoTHubTransportProvider
class OptionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_iothub_opt(
argv,
connection_string,
protocol=IoTHubTransportProvider.MQTT):
if len(argv) > 0:
try:
opts, args = getopt.getopt(
argv, "hp:c:", [
"protocol=", "connectionstring="])
except getopt.GetoptError as opt_error:
raise OptionError("Error: %s" % opt_error.msg)
for opt, arg in opts:
if opt == '-h':
raise OptionError("Help:")
elif opt in ("-p", "--protocol"):
protocol_string = arg.lower()
if protocol_string == "http":
if hasattr(IoTHubTransportProvider, "HTTP"):
protocol = IoTHubTransportProvider.HTTP
else:
raise OptionError("Error: HTTP protocol is not supported")
elif protocol_string == "amqp":
if hasattr(IoTHubTransportProvider, "AMQP"):
protocol = IoTHubTransportProvider.AMQP
else:
raise OptionError("Error: AMQP protocol is not supported")
elif protocol_string == "amqp_ws":
if hasattr(IoTHubTransportProvider, "AMQP_WS"):
protocol = IoTHubTransportProvider.AMQP_WS
else:
raise OptionError("Error: AMQP_WS protocol is not supported")
elif protocol_string == "mqtt":
if hasattr(IoTHubTransportProvider, "MQTT"):
protocol = IoTHubTransportProvider.MQTT
else:
raise OptionError("Error: MQTT protocol is not supported")
elif hasattr(IoTHubTransportProvider, "MQTT_WS"):
if hasattr(IoTHubTransportProvider, "MQTT_WS"):
protocol = IoTHubTransportProvider.MQTT_WS
else:
raise OptionError("Error: MQTT_WS protocol is not supported")
else:
raise OptionError(
"Error: unknown protocol %s" %
protocol_string)
elif opt in ("-c", "--connectionstring"):
connection_string = arg
if connection_string.find("HostName") < 0:
raise OptionError(
"Error: Hostname not found, not a valid connection string")
return connection_string, protocol
```
#### File: device/samples/iothub_client_shared_transport_sample.py
```python
import random
import time
import sys
import iothub_client
from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult
from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError, DeviceMethodReturnValue
from iothub_client import IoTHubTransport, IoTHubConfig, IoTHubClientRetryPolicy
from iothub_client_args import get_iothub_opt, OptionError
# messageTimeout - the maximum time in milliseconds until a message times out.
# The timeout period starts at IoTHubClient.send_event_async.
# By default, messages do not expire.
MESSAGE_TIMEOUT = 10000
RECEIVE_CONTEXT = 0
AVG_WIND_SPEED = 10.0
MIN_TEMPERATURE = 20.0
MIN_HUMIDITY = 60.0
MESSAGE_COUNT = 10
CONNECTION_STATUS_CONTEXT = 0
# global counters
RECEIVE_CALLBACKS = 0
SEND_CALLBACKS = 0
CONNECTION_STATUS_CALLBACKS = 0
SEND_REPORTED_STATE_CALLBACKS = 0
# for trasnport sharing chose AMQP or AMQP_WS as transport protocol
PROTOCOL = IoTHubTransportProvider.AMQP_WS
# Strings containing IoTHubName, IoTHubSuffix, Device Id & Device Key for two devices:
IOTHUBNAME = "[IoTHub Name]";
IOTHUBSUFFIX = "[IoTHub Suffix]";
DEVICE_NAME1 = "[Device Name 1]";
DEVICE_NAME2 = "[Device Name 2]";
DEVICE_KEY1 = "[Device Key 1]";
DEVICE_KEY2 = "[Device Key 2]";
MSG_TXT = "{\"deviceId\": \"myPythonDevice\",\"windSpeed\": %.2f,\"temperature\": %.2f,\"humidity\": %.2f}"
# some embedded platforms need certificate information
def set_certificates(client):
from iothub_client_cert import CERTIFICATES
try:
client.set_option("TrustedCerts", CERTIFICATES)
print ( "set_option TrustedCerts successful" )
except IoTHubClientError as iothub_client_error:
print ( "set_option TrustedCerts failed (%s)" % iothub_client_error )
def receive_message_callback1(message, counter):
global RECEIVE_CALLBACKS
message_buffer = message.get_bytearray()
size = len(message_buffer)
print ( "Received Message [%d]:" % counter )
print ( " Data: <<<%s>>> & Size=%d" % (message_buffer[:size].decode('utf-8'), size) )
map_properties = message.properties()
key_value_pair = map_properties.get_internals()
print ( " Properties: %s" % key_value_pair )
counter += 1
RECEIVE_CALLBACKS += 1
print ( " Total calls received: %d" % RECEIVE_CALLBACKS )
return IoTHubMessageDispositionResult.ACCEPTED
def receive_message_callback2(message, counter):
global RECEIVE_CALLBACKS
message_buffer = message.get_bytearray()
size = len(message_buffer)
print ( "Received Message [%d]:" % counter )
print ( " Data: <<<%s>>> & Size=%d" % (message_buffer[:size].decode('utf-8'), size) )
map_properties = message.properties()
key_value_pair = map_properties.get_internals()
print ( " Properties: %s" % key_value_pair )
counter += 1
RECEIVE_CALLBACKS += 1
print ( " Total calls received: %d" % RECEIVE_CALLBACKS )
return IoTHubMessageDispositionResult.ACCEPTED
def send_confirmation_callback1(message, result, user_context):
global SEND_CALLBACKS
print ( "Client1 - Confirmation[%d] received for message with result = %s" % (user_context, result) )
map_properties = message.properties()
print ( " message_id: %s" % message.message_id )
print ( " correlation_id: %s" % message.correlation_id )
key_value_pair = map_properties.get_internals()
print ( " Properties: %s" % key_value_pair )
SEND_CALLBACKS += 1
print ( " Total calls confirmed: %d" % SEND_CALLBACKS )
def send_confirmation_callback2(message, result, user_context):
global SEND_CALLBACKS
print ( "Client2 - Confirmation[%d] received for message with result = %s" % (user_context, result) )
map_properties = message.properties()
print ( " message_id: %s" % message.message_id )
print ( " correlation_id: %s" % message.correlation_id )
key_value_pair = map_properties.get_internals()
print ( " Properties: %s" % key_value_pair )
SEND_CALLBACKS += 1
print ( " Total calls confirmed: %d" % SEND_CALLBACKS )
def connection_status_callback(result, reason, user_context):
global CONNECTION_STATUS_CALLBACKS
print ( "Connection status changed[%d] with:" % (user_context) )
print ( " reason: %d" % reason )
print ( " result: %s" % result )
CONNECTION_STATUS_CALLBACKS += 1
print ( " Total calls confirmed: %d" % CONNECTION_STATUS_CALLBACKS )
def iothub_client_init(transport, device_name, device_key):
# prepare iothub client with transport
config = IoTHubConfig(PROTOCOL, device_name, device_key, "", IOTHUBNAME, IOTHUBSUFFIX, "")
client = IoTHubClient(transport, config)
# set the time until a message times out
client.set_option("messageTimeout", MESSAGE_TIMEOUT)
client.set_connection_status_callback(connection_status_callback, CONNECTION_STATUS_CONTEXT)
return client
def print_last_message_time(client):
try:
last_message = client.get_last_message_receive_time()
print ( "Last Message: %s" % time.asctime(time.localtime(last_message)) )
print ( "Actual time : %s" % time.asctime() )
except IoTHubClientError as iothub_client_error:
if iothub_client_error.args[0].result == IoTHubClientResult.INDEFINITE_TIME:
print ( "No message received" )
else:
print ( iothub_client_error )
def iothub_client_shared_transport_sample_run():
try:
# create transport to share
transport = IoTHubTransport(PROTOCOL, IOTHUBNAME, IOTHUBSUFFIX)
client1 = iothub_client_init(transport, DEVICE_NAME1, DEVICE_KEY1)
client1.set_message_callback(receive_message_callback1, RECEIVE_CONTEXT)
client2 = iothub_client_init(transport, DEVICE_NAME2, DEVICE_KEY2)
client2.set_message_callback(receive_message_callback2, RECEIVE_CONTEXT)
print ( "IoTHubClient has been initialized" )
while True:
# send a few messages every minute
print ( "IoTHubClient sending %d messages" % MESSAGE_COUNT )
for message_counter in range(0, MESSAGE_COUNT):
temperature = MIN_TEMPERATURE + (random.random() * 10)
humidity = MIN_HUMIDITY + (random.random() * 20)
msg_txt_formatted = MSG_TXT % (
AVG_WIND_SPEED + (random.random() * 4 + 2),
temperature,
humidity)
# messages can be encoded as string or bytearray
if (message_counter & 1) == 1:
message = IoTHubMessage(bytearray(msg_txt_formatted, 'utf8'))
else:
message = IoTHubMessage(msg_txt_formatted)
# optional: assign ids
message.message_id = "message_%d" % message_counter
message.correlation_id = "correlation_%d" % message_counter
# optional: assign properties
prop_map = message.properties()
prop_map.add("temperatureAlert", 'true' if temperature > 28 else 'false')
if (message_counter % 2) == 0:
client1.send_event_async(message, send_confirmation_callback1, message_counter)
else:
client2.send_event_async(message, send_confirmation_callback2, message_counter)
print ( "IoTHubClient.send_event_async accepted message [%d] for transmission to IoT Hub." % message_counter )
# Wait for Commands or exit
print ( "IoTHubClient waiting for commands, press Ctrl-C to exit" )
status_counter = 0
while status_counter < MESSAGE_COUNT:
status1 = client1.get_send_status()
print ( "Send status client1: %s" % status1 )
status2 = client2.get_send_status()
print ( "Send status client2: %s" % status2 )
time.sleep(10)
status_counter += 1
except IoTHubError as iothub_error:
print ( "Unexpected error %s from IoTHub" % iothub_error )
return
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
print_last_message_time(client1)
print_last_message_time(client2)
if __name__ == '__main__':
print ( "\nPython %s" % sys.version )
print ( "IoT Hub Client for Python" )
print ( "Starting the IoT Hub Shared Transport Python sample..." )
iothub_client_shared_transport_sample_run()
``` |
{
"source": "jonbinney/python-planning",
"score": 3
} |
#### File: python_task_planning/scripts/new_logic.py
```python
class ConjunctionOFluents:
def __init__(self, fluents):
self.fluents = fluents
class Fluent(ConjunctionOfFluents):
def __init__(self, pred, argnames, args):
self.pred = pred
self.argnames = argnames
self.args = args
self.rval = rval
def __and__(self, other):
class Predicate:
def __init__(self, name, argnames):
self.name = name
self.argnames = argnames
def __call__(self, args):
return Fluent(self.name, self.argnames, args)
def TableIsSet(table):
return Fluent('TableIsSet', ['table'], [table])
class Conjunction(Predicate):
def __init__(self, args
TableIsSet = Predicate('TableIsSet', argnames=['Table'])
```
#### File: src/python_task_planning/dot_graph.py
```python
from python_task_planning import ConjunctionOfFluents, OperatorInstance
import pygraphviz as pgv
lpk_style = dict(
operator_instance = dict(shape='box', style='filled', color='thistle1'),
primitive = dict(shape='box', style='filled', color='darkseagreen1'),
plan_goal = dict(shape='box', style='filled', color='lightsteelblue1'),
plan_step_arrow = dict(),
refinement_arrow = dict(style='dashed')
)
garish_style = dict(
operator_instance = dict(shape='box', style='filled', color='red'),
primitive = dict(shape='box', style='filled', color='green'),
plan_goal = dict(shape='box', style='filled', color='blue'),
plan_step_arrow = dict(),
refinement_arrow = dict(style='dashed')
)
def dot_from_plan_tree(tree, G=None, style=None):
if G is None:
G = pgv.AGraph(strict=True, directed=True)
if style is None:
style = lpk_style
G.add_node(id(tree), label=str(tree.goal), **style['plan_goal'])
if not tree.plan == None:
for op, subtree in tree.plan:
if op == None:
pass
elif op.concrete:
G.add_node(id(op), label=str(op), **style['primitive'])
G.add_edge(id(tree), id(op), **style['plan_step_arrow'])
else:
G.add_node(id(op), label=str(op), **style['operator_instance'])
G.add_edge(id(tree), id(op), **style['plan_step_arrow'])
dot_from_plan_tree(subtree, G, style)
G.add_edge(id(op), id(subtree), **style['refinement_arrow'])
return G
``` |
{
"source": "jonbinney/trajectory_smoothing",
"score": 3
} |
#### File: src/traj/discrete_time_parameterization.py
```python
import numpy as np
MAX_TIME_STEPS = 10000
# How close we have to be to a given position/velocity/acceleration to consider it "reached".
# These are needed because we are using a discrete approximation of the trajectory. These
# should all be small enough that if you commanded zero velocity starting now, the resulting
# deceleration would be less than the robot's limits.
POSITION_THRESHOLD = 0.01
VELOCITY_THRESHOLD = 0.001
ACCELERATION_THRESHOLD = 0.01
JERK_THRESHOLD = 0.01
def pvaj_to_pppp(p0, v0, a0, j0, delta_t):
"""
Convert position, velocity, acceleration, and jerk at one timestep to positions for
the next 4 timesteps.
"""
a_arr = [a0, a0 + j0 * delta_t]
v_arr = [v0]
for a in a_arr:
v_arr.append(v_arr[-1] + delta_t * a)
p_arr = [p0]
for v in v_arr:
p_arr.append(p_arr[-1] + delta_t * v)
return p_arr
def pppp_to_pvaj(pppp):
pass
def integrate(p, v, a, j, delta_t):
"""
Propagate the trajectory forward one timestep.
"""
return p + v * delta_t, v + a * delta_t, a + j * delta_t
def smooth_stop_fine_adjustment(trajectory, is_valid, j_max, delta_t, increments=10):
"""
"""
smoothed_trajectory = np.tile(np.nan, (MAX_TIME_STEPS, 4))
smoothed_trajectory[:len(trajectory)] = trajectory
if len(trajectory) == 0:
return trajectory
for j in np.linspace(-j_max, ):
for time_i in range(len(trajectory)):
pass
def smooth_stop(trajectory, is_valid, j_max, delta_t):
"""
We start with a trajectory which reaches zero velocity if use the most negative valid jerk
at every timestep. Unfortunately we probably reach zero velocity with a large negative
acceleration. We need to reach zero velocity and zero acceleration at the same time,
and so need to switch from max negative jerk to max positive jerk at some timestep.
"""
if trajectory[-1, 2] > -ACCELERATION_THRESHOLD:
# No Need to add a positive jerk section.
return trajectory
smoothed_trajectory = np.tile(np.nan, (MAX_TIME_STEPS, 4))
smoothed_trajectory[:len(trajectory)] = trajectory
# We need to add a positive jerk section.
for positive_jerk_start_time_i in range(len(trajectory) - 1, -1, -1):
for time_i in range(positive_jerk_start_time_i, len(smoothed_trajectory) - 1):
if not is_valid(*smoothed_trajectory[time_i, :3], j_max):
return None
if smoothed_trajectory[time_i, 1] < -VELOCITY_THRESHOLD:
# We weren't reduce acceleration magnitude to zero before velocity hit zero.
break
if smoothed_trajectory[time_i, 2] > -ACCELERATION_THRESHOLD:
return smoothed_trajectory[:time_i + 1]
smoothed_trajectory[time_i, 3] = j_max
smoothed_trajectory[time_i + 1, :3] = integrate(*smoothed_trajectory[time_i], delta_t)
# We were unable to decelerate.
return None
def compute_stopping_trajectory(p_start, v_start, a_start, is_valid, j_max, delta_t):
"""
The jerk returned by this function during the final timestep is meaningless, since we've
already stopped at that point.
"""
trajectory = np.tile(np.nan, (MAX_TIME_STEPS, 4))
trajectory[0, :3] = p_start, v_start, a_start
# Decelerate until our velocity drops to zero.
for time_i in range(MAX_TIME_STEPS):
# Invariant: positions, velocities, and accelerations up to and including index time_i have
# been defined. Jerks up to and including index time_i - 1 have been defined. positions,
# velocities, accelerations, and jerks up to and including index time_i - 1 are set to
# valid values. No guarantees for the validity of position, velocity, and acceleration
# values at index time_i.
found_valid_jerk = False
for j in (-j_max, 0.0):
if not is_valid(*trajectory[time_i, :3], j):
continue
trajectory[time_i, 3] = j
trajectory[time_i + 1, :3] = integrate(*trajectory[time_i], delta_t)
# Position, velocity, and acceleration in the next timestep depend on the jerk
# from this timestep, so we have to look one timestep into the future. In particular,
# if we don't do this check, the acceleration might become too negative.
if not is_valid(*trajectory[time_i + 1, :3], 0.0):
continue
if trajectory[time_i + 1, 1] < VELOCITY_THRESHOLD:
return smooth_stop(trajectory[:time_i + 2], is_valid, j_max, delta_t)
# We try the most desirable jerk (the one that will slow us down the fastest) first.
# Because of this, we can stop as soon as we find a valid jerk - it is definitely
# the best one.
found_valid_jerk = True
break
if not found_valid_jerk:
return None
raise RuntimeError('Failed to find a solution after {} trajectory points'.format(
MAX_TIME_STEPS))
def parameterize_path_discrete(p_start, p_end, is_valid, j_max, delta_t):
# Each row of the trajectory array is one timestep. The columns contain values for position,
# velocity, acceleration, and jerk, in that order. We start off with all values as NaN so
# that we can tell if we accidentally use an uninitialized value.
trajectory = np.tile(np.nan, (MAX_TIME_STEPS, 4))
trajectory[0][:3] = p_start, 0.0, 0.0
stopping_trajectory = None
for time_i in range(MAX_TIME_STEPS):
next_stopping_trajectory = None
if is_valid(*trajectory[time_i, :3], j_max):
# Integrate trajectory forward to the next timestep using this jerk.
p_next, v_next, a_next = integrate(*trajectory[time_i, :3], j_max, delta_t)
next_stopping_trajectory = compute_stopping_trajectory(
p_next, v_next, a_next, is_valid, j_max, delta_t)
if next_stopping_trajectory is None:
if stopping_trajectory is None:
raise RuntimeError("No valid trajectory at start")
else:
# Use the jerk from last timestep's stopping trajectory.
trajectory[time_i, 3] = stopping_trajectory[0, 3]
trajectory[time_i + 1, :3] = integrate(*trajectory[time_i], delta_t)
stopping_trajectory = stopping_trajectory[1:]
else:
trajectory[time_i, 3] = j_max
stopping_trajectory = next_stopping_trajectory
trajectory[time_i + 1, :3] = p_next, v_next, a_next
if stopping_trajectory[-1][0] >= p_end - POSITION_THRESHOLD:
# Reached our goal.
return np.vstack((trajectory[:time_i], stopping_trajectory))
raise RuntimeError(
'Failed to find a solution after {} trajectory points'.format(MAX_TIME_STEPS))
```
#### File: src/traj/fanuc_limits.py
```python
import numpy as np
from scipy.interpolate import interp2d
def gen_limit_interpolation_func(no_load_thresh, max_load_thresh, payload_max,
cart_vmax=4.0, num_steps=20,
interp_func='linear'):
"""Generates a function which can be used to lookup the scaled (positive)
limit for a joint based on the NO and MAX load threshold tables, payload
and Cartesian velocity of the TCP of a Fanuc with J519.
The returned function object wraps the function returned by a call to
scipy.interp2d(..), taking in current Cartesian velocity of the TCP
(in m/s) and current weight of the payload (in Kg) and returns the 2D
interpolated velocity, acceleration or jerk limit based on the information
in the provided threshold tables (see below).
The payload argument is optional and will default to 'payload_max', as
provided in the call to 'gen_limit_interpolation_func(..)' (ie: this
function).
This will result in the slowest limit being returned, which would result
in a conservative estimate of the capabilities of the robot for the given
Cartesian velocity (but should not result in motion execution errors due
to violating a joint limit).
Depending on whether threshold tables for velocity, acceleration or jerk
are passed in, the limits returned are velocity, acceleration or jerk
limits.
The threshold tables are expected to conform to the format as returned by
a controller with J519 upon receipt of a 'Type 3' packet (ie: Request/Ack).
Assumptions are: 20 elements per table, elements of type float, sorted in
descending order (ie: max limit -> fully scaled down limit).
Args:
no_load_thresh: threshold table for NO load configuration
list(float)
max_load_thresh: threshold table for MAX load configuration
list(float)
payload_max: maximum payload supported by the robot (Kg)
float
cart_vmax: maximum Cartesian velocity supported by the robot (m/s)
default: 4.0
float
num_steps: number of entries in a single threshold table
default: 20
int
interp_func: order of interpolation used. Passed on to interp2d(..)
default: 'linear'
str
Returns:
Function wrapping the return value of scipy.interp2d(..).
Args:
cart_vel: the Cartesian velocity of the TCP (m/s)
float
payload: the weight of the current payload of the robot (Kg)
default: payload_max
float
Returns:
2D interpolated joint limit for the given Cartesian velocity and
payload.
Example:
# create interpolation function for the acceleration limits of J1, with
# a maximum payload of 25 Kg, and the default maximum Cartesian velocity
# (of 4.0 m/s), default number of elements in the threshold tables (20)
# and the default interpolation strategy (linear).
j1_acc_limit_func = gen_limit_interpolation_func(
no_load_thresh=[2050.00, 2050.00, ..],
max_load_thresh=[1601.56, 1601.56, ..],
payload_max=25.0
)
# determine acceleration limit for J1 with TCP moving at 1.5 m/s and
# with a current payload of 6.3 Kg
j1_curr_acc_limit = j1_acc_limit_func(cart_vel=1.5, payload=6.3)[0]
# determine acceleration limits for J1 with TCP moving at 1.1, 1.35 and
# 1.47 m/s and the default (ie: max) payload
j1_acc_limits = j1_acc_limit_func(cart_vel=[1.1, 1.35, 1.47])
"""
len_nlt = len(no_load_thresh)
len_mlt = len(max_load_thresh)
if len_nlt != num_steps or len_mlt != num_steps:
raise ValueError(
"Threshold table should contain {} elements (got: {} and {} "
"elements for NO and MAX load respectively)"
.format(num_steps, len_nlt, len_mlt))
# TODO: check for negative max payloads
# TODO: check for negative max cart vel
# TODO: check for negative num steps
# TODO: this sets up a full 2D interpolation. Not sure that is what
# we want. Perhaps we do need to consider the 'binning' on the X-axis
# (ie: percentage of max cart velocity)
x = np.linspace(cart_vmax/num_steps, cart_vmax, num_steps)
y = [0.0, payload_max]
z = [no_load_thresh, max_load_thresh]
limit_interp2d = interp2d(x, y, z, kind=interp_func)
# create function object for caller to use for lookups
# note: similar to the robot controller, we assume maximum payload
# if nothing else has been provided
# TODO: check whether we should optimise for single lookups (ie: instead
# of mesh/multiple lookups at once): https://stackoverflow.com/a/47233198
def func(cart_vel, payload=payload_max):
# TODO: check for negative payload
# TODO: check for negative cart vel
return limit_interp2d(cart_vel, payload)
return func
```
#### File: src/traj/traj_segment.py
```python
from sympy import integrate, Symbol
from sympy.core.numbers import Float
from .piecewise_function import PiecewiseFunction
import traj
import math
import rospy
# Function to assign jerk sign for each phase based on the motion (+ve/-ve): it is determined by start/end vel, and pos_diff
def assign_jerk_sign_According_to_motion_type(p_start, p_end, v_start, v_end, p_max, v_max, a_max, j_max):
'''
This function assigns jerk sign for each phase of the segment based on the motion type (+ve/-ve)
'''
abs_v_start = abs(v_start)
abs_v_end = abs(v_end)
if v_start == v_end:
j_max_to_vf=0
j_max = math.copysign(j_max, (p_end-p_start))
else:# v_end != v_start:
if v_start*v_end < 0: #won't be need it in complex motion case
rospy.logdebug("this a complex motion, stop point will be calculated to join the +ve/-ve motion part " )
elif abs_v_start < abs_v_end : #acc motion
if(v_start >= 0 and v_end >= 0): # positive motion
j_max_to_vf = j_max #math.copysign(j_max, v_end)
j_max = math.copysign(j_max, v_end)
elif (v_start <= 0 and v_end <= 0): # negative motion
j_max_to_vf = -j_max #math.copysign(j_max, v_end)
j_max = math.copysign(j_max, v_end)
else:# v_start > v_end : #dec motion
if(v_start >= 0 and v_end >= 0): # positive motion
j_max_to_vf = -j_max #math.copysign(j_max, v_end)
j_max = math.copysign(j_max, v_end)
elif (v_start <= 0 and v_end <= 0): # negative motion
j_max_to_vf = j_max #math.copysign(j_max, v_end)
j_max = math.copysign(j_max, v_end)
return j_max_to_vf, j_max
def calculate_jerk_sign_and_duration(p_start, p_end, v_start, v_end, p_max, v_max, a_max, j_max, independent_variable=Symbol('t')):
'''
this function calculates the jerk_value && the duration associated with each phase of the segment
'''
assert(a_max > 0.0)
assert(j_max > 0.0)
assert(v_max > 0.0)
# Step_1: check limits for given start/end velocities/positions
# if absolute values v_start/v_end/p_end is greater than v_max/p_max, we replace the values with max one
# another option is to raise error and exit
# for p_start: it depends on direction of v_start, as we can not put p_start as p_max if v_start is in +ve direction
if(abs(v_start) > v_max):
v_start = math.copysign(v_max, v_start)
rospy.logdebug("\nWarning: \n>>> these values are not feasible: v_start should be within the limit v_max !")
rospy.logdebug(">>> v_start: {}, v_max: {}".format(v_start, v_max) )
#if abs(v_start) - v_max >1e-15:
raise ValueError("non feasible case: violate v_max, v_start: {}, v_max: {}".format(v_start, v_max) )
if(abs(v_end) > v_max):
v_end = math.copysign(v_max, v_end)
rospy.logdebug("\nWarning: \n>>> these values are not feasible, v_end should be within the limit v_max !")
rospy.logdebug(">>> v_end: {}, v_max: {}".format(v_end, v_max) )
raise ValueError("non feasible case: violate v_max, v_end: {}, v_max: {}".format(v_end, v_max) )
if(abs(p_end) > p_max):
rospy.logdebug("\nWarning: \n>>> these values are not feasible, p_end should be within the limit p_max !")
p_end = math.copysign(p_max, p_end)
if(abs(p_start) > p_max):
p_start = math.copysign(p_max, p_start)
if (p_start*v_start>0.0) or (v_start==0 and p_start*v_end>0.0): #direction of motion
rospy.debug("\nWarning: \n>>> these values are not feasible, p_start = p_max, and motion in the direction of v_start will violate p_max!")
raise ValueError("non feasible case: violate p_max" )
# reject unfeasible/iillogical cases
if (v_start>0 and v_end>0 and (p_end-p_start)<0): # +ve motion vs -ve pos_diff
raise ValueError("non feasible case: vel_motion opposite to pos_motion" )
elif (v_start<0 and v_end<0 and (p_end-p_start)>0): # -ve motion vs +ve pos_diff
raise ValueError("non feasible case: vel_motion opposite to pos_motion" )
# absolute value of the velocities
abs_v_start = abs(v_start)
abs_v_end = abs(v_end)
# Step_2: check motion type: complex or simple motion
# 1) complex motion: positive and negative velocities, v_start*v_end<0 ####
if (v_start * v_end) < 0.0 : #complex motion: positive and negative velocity, check min distance to change diraction of the motion
minPos_to_zero, acc_to_zero, t_jrk_to_zero, t_acc_to_zero = traj.calculate_min_pos_reached_acc_jrk_time_acc_time_to_reach_final_vel(v_start, 0.0, v_max, a_max, j_max)
minPos_to_vf, acc_to_vf, t_jrk_to_vf, t_acc_to_vf = traj.calculate_min_pos_reached_acc_jrk_time_acc_time_to_reach_final_vel( 0.0, v_end, v_max, a_max, j_max)
pos_diff = p_end - p_start
pos_dominant = pos_diff - minPos_to_zero - minPos_to_vf
# A) complex positive motion case
if pos_dominant > 0.0: # positive dominant case, main part of the motion is in the +ve direction
if v_start < 0.0 and v_end > 0.0: # from negative to positive
if abs(p_start+minPos_to_zero) > p_max or abs(p_start+minPos_to_zero+minPos_to_vf) > p_max or abs(p_start+minPos_to_zero+minPos_to_vf+pos_dominant) > p_max:
raise ValueError("non feasible case: violate p_max")
rospy.logdebug("\n\n>>>positive dominant case: negative to positive: {}, {}, {}, {}".format(p_start, p_end, v_start, v_end) )
t_jrk_not_used, t_acc_not_used, t_jrk_dominant, t_acc_dominant, t_vel_dominant = traj.traj_segment_planning(p_start, p_end - minPos_to_zero - minPos_to_vf, abs_v_end, abs_v_end, v_max, a_max, j_max)
segment_jerks_and_durations = [( j_max, t_jrk_to_zero), (0.0, t_acc_to_zero), (-j_max, t_jrk_to_zero ),
( j_max, t_jrk_to_vf), (0.0, t_acc_to_vf), (-j_max, t_jrk_to_vf ),
( j_max, t_jrk_dominant), (0.0, t_acc_dominant), (-j_max, t_jrk_dominant), (0, t_vel_dominant),(-j_max, t_jrk_dominant), (0.0, t_acc_dominant), (j_max, t_jrk_dominant) ]
elif v_start > 0.0 and v_end < 0.0: #from positive to negative
if abs(p_start+pos_dominant) > p_max or abs(p_start+pos_dominant+minPos_to_zero) > p_max or abs(p_start+pos_dominant+minPos_to_zero+minPos_to_vf) > p_max:
raise ValueError("non feasible case: violate p_max")
rospy.logdebug("\n\n>>>positive dominant case: positive to negative: {}, {}, {}, {}".format(p_start, p_end, v_start, v_end))
t_jrk_not_used, t_acc_not_used, t_jrk_dominant, t_acc_dominant, t_vel_dominant = traj.traj_segment_planning(p_start, p_end-minPos_to_zero-minPos_to_vf, abs_v_start, abs_v_start, v_max, a_max, j_max)
segment_jerks_and_durations = [( j_max, t_jrk_dominant), (0.0, t_acc_dominant), (-j_max, t_jrk_dominant), (0, t_vel_dominant), (-j_max, t_jrk_dominant), (0.0, t_acc_dominant), (j_max, t_jrk_dominant),
(-j_max, t_jrk_to_zero), (0.0, t_acc_to_zero), ( j_max, t_jrk_to_zero ),
(-j_max, t_jrk_to_vf), (0.0, t_acc_to_vf), (j_max, t_jrk_to_vf ) ]
else:
raise ValueError("\n>> should be simple motion instead of complex motion case!")
# B) complex negative motion case
if pos_dominant < 0.0: # negative dominant case, main part of the motion is in the -ve direction
if v_start < 0.0 and v_end > 0.0: # from negative to positive
if abs(p_start+pos_dominant) > p_max or abs(p_start+pos_dominant+minPos_to_zero) > p_max or abs(p_start+pos_dominant+minPos_to_zero+minPos_to_vf) > p_max:
raise ValueError("non feasible case: violate p_max")
rospy.logdebug("\n\n>>>negative dominant case: negative to positive: {}, {}, {}, {}".format(p_start, p_end, v_start, v_end))
t_jrk_not_used, t_acc_not_used, t_jrk_dominant, t_acc_dominant, t_vel_dominant = traj.traj_segment_planning(p_start, p_end-minPos_to_zero-minPos_to_vf, abs_v_start, abs_v_start, v_max, a_max, j_max)
segment_jerks_and_durations = [(-j_max, t_jrk_dominant), (0.0, t_acc_dominant), ( j_max, t_jrk_dominant), (0, t_vel_dominant),(j_max, t_jrk_dominant), (0.0, t_acc_dominant), (-j_max, t_jrk_dominant),
( j_max, t_jrk_to_zero), (0.0, t_acc_to_zero), (-j_max, t_jrk_to_zero ),
( j_max, t_jrk_to_vf), (0.0, t_acc_to_vf), (-j_max, t_jrk_to_vf ) ]
elif v_start > 0.0 and v_end < 0.0: #from positive to negative
if abs(p_start+minPos_to_zero) > p_max or abs(p_start+minPos_to_zero+minPos_to_vf) > p_max or abs(p_start+minPos_to_zero+minPos_to_vf+pos_dominant) > p_max:
raise ValueError("non feasible case: violate p_max")
rospy.logdebug("\n\n>>>negative dominant case: positive to negative: {}, {}, {}, {}".format(p_start, p_end, v_start, v_end) )
t_jrk_not_used, t_acc_not_used, t_jrk_dominant, t_acc_dominant, t_vel_dominant = traj.traj_segment_planning(p_start+ minPos_to_zero + minPos_to_vf, p_end , abs_v_end, abs_v_end, v_max, a_max, j_max)
segment_jerks_and_durations = [(-j_max, t_jrk_to_zero), (0.0, t_acc_to_zero), ( j_max, t_jrk_to_zero ),
(-j_max, t_jrk_to_vf), (0.0, t_acc_to_vf), ( j_max, t_jrk_to_vf ),
(-j_max, t_jrk_dominant), (0.0, t_acc_dominant), ( j_max, t_jrk_dominant), (0, t_vel_dominant), ( j_max, t_jrk_dominant), (0.0, t_acc_dominant), (-j_max, t_jrk_dominant) ]
else:
raise ValueError("\n>> should be simple motion instead of complex motion case!")
# check if final_velocity value gives optimal motion to change from +ve/-ve to -ve/+ve
# this part can be used later to assign velocity vf in the parameterizarion part
minPos_v02vf = minPos_to_zero + minPos_to_vf
if v_start < 0 and v_end > 0: #from -ve to +ve
if pos_diff < minPos_v02vf:
rospy.logdebug(">>>>>> non optimal case <<<<<<< ")
else:
if pos_diff > minPos_v02vf:
rospy.logdebug(">>>>>> non optimal case <<<<<<< ")
# 2)simple motion: positive or negative velocity, v0 and vf have same sign
else:
# same action will be performed in both simple +ve or simple -ve motion, this part can be used later
# A) simple positive motion
if(v_start >= 0 and v_end >= 0): # case one: both are positive
rospy.logdebug("\n\n>>>simple postive motion: {}, {}, {}, {} ".format(p_start, p_end, v_start, v_end))
# B) simple negative motion
elif (v_start <= 0 and v_end <= 0): # case two: both are negative
rospy.logdebug("\n\n>>>simple negative motion: {}, {}, {}, {} ".format(p_start, p_end, v_start, v_end))
t_jrk_to_vf, t_acc_to_vf, t_jrk, t_acc, t_vel = traj.traj_segment_planning(p_start, p_end, abs_v_start, abs_v_end, v_max, a_max, j_max)
j_max_to_vf, j_max = assign_jerk_sign_According_to_motion_type(p_start, p_end, v_start, v_end, p_max, v_max, a_max, j_max)
if abs_v_end > abs_v_start:
segment_jerks_and_durations = [(j_max_to_vf, t_jrk_to_vf), (0.0, t_acc_to_vf), (-j_max_to_vf, t_jrk_to_vf), (j_max, t_jrk), (0.0, t_acc), (-j_max, t_jrk), (0.0, t_vel), (-j_max,t_jrk), (0.0, t_acc), (j_max, t_jrk)]
else:
segment_jerks_and_durations = [(j_max, t_jrk), (0.0, t_acc), (-j_max, t_jrk), (0.0, t_vel), (-j_max,t_jrk), (0.0, t_acc), (j_max, t_jrk), (j_max_to_vf, t_jrk_to_vf), (0.0, t_acc_to_vf), (-j_max_to_vf, t_jrk_to_vf)]
# one option to retun segment_jerks_and_durations and send it to JTC and then use it for interpolation on the JTC side
return segment_jerks_and_durations
# the main function to fit traj segment with generic start/end velocities
def fit_traj_segment(p_start, p_end, v_start, v_end, p_max, v_max, a_max, j_max, independent_variable=Symbol('t')):
'''
This function selects a motion profile for a general trajectory segment with a given start/end velocities/positions
considering the start and end accelerations/jerks are zeros
'''
# Step_1. calculate jerk_sign_and_duration
segment_jerks_and_durations = calculate_jerk_sign_and_duration(p_start, p_end, v_start, v_end, p_max, v_max, a_max, j_max, independent_variable=Symbol('t'))
# Step_2: generate pos, vel, acc, jrk using the calculated "segment_jerks_and_durations"
p0 = p_start
v0 = v_start
a0 = 0.0
times = [0.0]
jerk_functions = []
acceleration_functions = []
velocity_functions = []
position_functions = []
# Integrate jerk starting from the start of the trajectory and going all the way through the end.
for j0, T in segment_jerks_and_durations:
times.append(times[-1] + T)
j = Float(j0)
a = integrate(j, independent_variable) + a0
v = integrate(a, independent_variable) + v0
p = integrate(v, independent_variable) + p0
jerk_functions.append(j)
acceleration_functions.append(a)
velocity_functions.append(v)
position_functions.append(p)
a0 = a.subs({independent_variable: T})
v0 = v.subs({independent_variable: T})
p0 = p.subs({independent_variable: T})
position = PiecewiseFunction(times, position_functions, independent_variable)
velocity = PiecewiseFunction(times, velocity_functions, independent_variable)
acceleration = PiecewiseFunction(times, acceleration_functions, independent_variable)
jerk = PiecewiseFunction(times, jerk_functions, independent_variable)
return position, velocity, acceleration, jerk
``` |
{
"source": "jonbirge/honda-box",
"score": 2
} |
#### File: jonbirge/honda-box/app.py
```python
import os
import redis
from flask import Flask, flash, request, redirect
from flask import send_from_directory, render_template, session
from flask_autoindex import AutoIndex
from werkzeug.utils import secure_filename
from random import randint
from PIL import Image
from hondabox import RES_LIST, auto_scale, solid_color
# Constants & parameters
UPLOAD_BASE = '/data/boxes/'
CONTENT_LENGTH = 10 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['jpg', 'png', 'jpeg', 'gif'])
SECRET_KEY = 'viadelamesatemeculaca'
PIN_DIGITS = 10
MIN_PIN_LEN = 6
AUTO_INDEX_OPTIONS = '?sort_by=modified&order=desc'
HONDA_RES = {
"Accord (2018-Present)": "720p",
"Accord (Pre-2018)": "WVGA",
"Civic": "WVGA",
"Clarity": "WVGA",
"CR-V": "WVGA",
"Fit": "WVGA",
"HR-V": "WVGA",
"Insight": "WVGA",
"Passport": "720p",
"Odyssey (2019-Present)": "720p",
"Odyssey (Pre-2019)": "WVGA",
"Pilot (2019-Present)": "720p",
"Pilot (Pre-2019)": "WVGA",
"Ridgeline": "WVGA",
}
for thekey in RES_LIST:
HONDA_RES[thekey] = thekey
# Configuration
app = Flask(__name__)
cache = redis.Redis(host='redis', port=6379)
app.config['UPLOAD_BASE'] = UPLOAD_BASE
app.config['MAX_CONTENT_LENGTH'] = CONTENT_LENGTH
app.secret_key = SECRET_KEY
# AutoIndex configuration
files_index = AutoIndex(app, '/data', add_url_rules=False)
# Utility functions
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def random_pin():
return str(randint(10**(PIN_DIGITS - 1), 10**PIN_DIGITS - 1))
def redisint(key, cache=cache):
getkey = cache.get(key)
if getkey is None:
return 0
else:
return int(getkey)
# Endpoints
@app.route('/')
def index():
cache.incr('main_gets')
if 'pin' in session:
default_pin = session['pin']
else:
default_pin = None
return render_template('index.html', pin = default_pin)
@app.route('/makecolor', methods=['GET', 'POST'])
def make_color():
if request.method == 'POST': # POST method handler
### check for errors...
car = request.form['model']
session['car'] = car
color = request.form['color']
session['color'] = color
userpin = request.form['pin']
rawfilename = request.form['filename']
problems = 0
if len(userpin) < MIN_PIN_LEN:
flash('PIN is too short')
problems += 1
else:
session['pin'] = userpin
if len(rawfilename) < 3:
flash('Filename is too short')
problems += 1
### handle request if ok
if problems == 0:
filename = secure_filename(rawfilename) + '.jpeg'
fullpath = os.path.join(app.config['UPLOAD_BASE'], userpin)
try:
os.mkdir(fullpath)
except:
pass # we don't care!
finalfile = os.path.join(fullpath, filename)
colorimage = solid_color(color, HONDA_RES[car])
colorimage.save(finalfile, 'JPEG')
flash('Background file created: ' + filename)
return redirect(request.url)
else: # GET method handler
if not 'pin' in session:
session['pin'] = random_pin()
if not 'car' in session:
session['car'] = next(iter(HONDA_RES.keys()))
carlist = list(HONDA_RES.keys())
if not 'color' in session:
default_color = 'rgb(1,0,0)'
else:
default_color = session['color']
return render_template('makecolor.html',
cars=carlist, thecar=session['car'], pin=session['pin'],
defname=random_pin(), startcolor=default_color)
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST': # POST method handler
cache.incr('upload_tries')
### check for errors...
car = request.form['model']
session['car'] = car
problems = 0
if 'file' not in request.files:
flash('No file selected')
problems += 1
else:
file = request.files['file']
if not allowed_file(file.filename):
flash('Bad file type')
problems += 1
userpin = request.form['pin']
if len(userpin) < MIN_PIN_LEN:
flash('PIN is too short')
problems += 1
else:
session['pin'] = userpin
if problems > 0:
return redirect(request.url)
### handle request
filename = secure_filename(file.filename)
fullpath = os.path.join(app.config['UPLOAD_BASE'], userpin)
tempopath = os.path.join('/tmp/', userpin)
try:
os.mkdir(fullpath)
except:
pass # we don't care!
try:
os.mkdir(tempopath)
except:
pass
### process file
tmpfile = os.path.join(tempopath, filename)
finalfile = os.path.join(fullpath, filename)
file.save(tmpfile)
origimage = Image.open(tmpfile)
scaledimage = auto_scale(origimage, HONDA_RES[car])
scaledimage.save(finalfile, 'JPEG')
os.remove(tmpfile)
cache.incr('upload_goods')
boxurl = request.url_root + 'data/boxes/' + userpin
return render_template('success.html',
filename=filename, car=car, url=boxurl)
else: # GET method handler
cache.incr('upload_gets')
if not 'pin' in session:
session['pin'] = random_pin()
if not 'car' in session:
session['car'] = next(iter(HONDA_RES.keys()))
carlist = list(HONDA_RES.keys())
return render_template('upload.html',
cars=carlist, thecar=session['car'], pin=session['pin'])
@app.route('/box/', methods=['GET', 'POST'])
@app.route('/box', methods=['GET', 'POST'])
def goto_box():
cache.incr('download_gets')
if request.method == 'POST': # POST method handler
userpin = request.form['pin']
if len(userpin) < 6:
flash('PIN is too short')
return redirect(request.url)
boxpath = '/data/boxes/' + userpin + AUTO_INDEX_OPTIONS
session['pin'] = userpin
return redirect(boxpath)
else: # GET method handler
if 'pin' in session:
default_pin = session['pin']
else:
default_pin =''
return render_template('download.html', pin=default_pin)
@app.route('/data/<path:path>')
def autoindex(path='.'):
try:
cache.incr('download_tries')
cache.incr('download_goods') # assume we succeed...
return files_index.render_autoindex(path)
except:
cache.decr('download_goods') # ...until we don't
thebox = 'data/' + path
return render_template('missing.html', box=thebox)
@app.route('/data')
@app.route('/data/')
@app.route('/data/boxes')
@app.route('/data/boxes/')
def static_files():
return redirect('/box')
@app.route('/stats')
def stats():
mains = redisint('main_gets')
upload_goods = redisint('upload_goods')
upload_gets = redisint('upload_gets')
upload_tries = redisint('upload_tries')
download_goods = redisint('download_goods')
download_gets = redisint('download_gets')
download_tries = redisint('download_tries')
return render_template('stats.html',
statreads=cache.incr('stat_gets'), mainloads=mains,
upload_goods=upload_goods, upload_tries=upload_tries, upload_gets=upload_gets,
download_goods=download_goods, download_tries=download_tries, download_gets=download_gets)
# running debug server
if __name__ == "__main__":
app.run("0.0.0.0", port = 5000, debug = True)
``` |
{
"source": "jonblack/cmpgpx",
"score": 3
} |
#### File: jonblack/cmpgpx/cmpgpx.py
```python
import argparse
import logging
import math
import os
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(message)s', level=logging.INFO)
_log = logging.getLogger(__name__)
logging.getLogger('geotiler').setLevel(logging.INFO)
logging.getLogger('geotiler.map').setLevel(logging.INFO)
logging.getLogger('geotiler.tilenet').setLevel(logging.INFO)
import cairocffi as cairo
import geotiler
import gpxpy
import numpy
import geo
import gfx
def align_tracks(track1, track2, gap_penalty):
""" Needleman-Wunsch algorithm adapted for gps tracks. """
_log.info("Aligning tracks")
def similarity(p1, p2):
d = gpxpy.geo.distance(p1.latitude, p1.longitude, p1.elevation,
p2.latitude, p2.longitude, p2.elevation)
return -d
# construct f-matrix
f = numpy.zeros((len(track1), len(track2)))
for i in range(0, len(track1)):
f[i][0] = gap_penalty * i
for j in range(0, len(track2)):
f[0][j] = gap_penalty * j
for i in range(1, len(track1)):
t1 = track1[i]
for j in range(1, len(track2)):
t2 = track2[j]
match = f[i-1][j-1] + similarity(t1, t2)
delete = f[i-1][j] + gap_penalty
insert = f[i][j-1] + gap_penalty
f[i, j] = max(match, max(delete, insert))
# backtrack to create alignment
a1 = []
a2 = []
i = len(track1) - 1
j = len(track2) - 1
while i > 0 or j > 0:
if i > 0 and j > 0 and \
f[i, j] == f[i-1][j-1] + similarity(track1[i], track2[j]):
a1.insert(0, track1[i])
a2.insert(0, track2[j])
i -= 1
j -= 1
elif i > 0 and f[i][j] == f[i-1][j] + gap_penalty:
a1.insert(0, track1[i])
a2.insert(0, None)
i -= 1
elif j > 0 and f[i][j] == f[i][j-1] + gap_penalty:
a1.insert(0, None)
a2.insert(0, track2[j])
j -= 1
return a1, a2
def draw_alignment(track1, track2, bounds):
""" Draws the aligned tracks with the given bounds onto a cairo surface. """
_log.info("Drawing alignment")
mm = geotiler.Map(extent=bounds, zoom=14)
width, height = mm.size
image = geotiler.render_map(mm)
# create cairo surface
buff = bytearray(image.convert('RGBA').tobytes('raw', 'BGRA'))
surface = cairo.ImageSurface.create_for_data(
buff, cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
a1_l = len(track1)
a2_l = len(track2)
assert a1_l == a2_l
p_radius = 2
for i in range(0, a1_l):
if a1[i] is not None and a2[i] is not None:
cr.set_source_rgba(0.2, 0.7, 1.0, 1.0)
a1_x, a1_y = mm.rev_geocode((a1[i].longitude, a1[i].latitude))
cr.arc(a1_x, a1_y, p_radius, 0, 2 * math.pi)
cr.fill()
cr.set_source_rgba(0.0, 0.0, 1.0, 1.0)
a2_x, a2_y = mm.rev_geocode((a2[i].longitude, a2[i].latitude))
cr.arc(a2_x, a2_y, p_radius, 0, 2 * math.pi)
cr.fill()
elif a1[i] is not None and a2[i] is None:
cr.set_source_rgba(1.0, 0.0, 0.0, 1.0)
a1_x, a1_y = mm.rev_geocode((a1[i].longitude, a1[i].latitude))
cr.arc(a1_x, a1_y, p_radius, 0, 2 * math.pi)
cr.fill()
elif a1[i] is None and a2[i] is not None:
cr.set_source_rgba(1.0, 0.5, 0.0, 1.0)
a2_x, a2_y = mm.rev_geocode((a2[i].longitude, a2[i].latitude))
cr.arc(a2_x, a2_y, p_radius, 0, 2 * math.pi)
cr.fill()
return surface
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('gpx_file1', type=argparse.FileType('r'))
parser.add_argument('gpx_file2', type=argparse.FileType('r'))
parser.add_argument('-c', '--cutoff', type=int, default=10,
help="cutoff distance in meters for similar points")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-e', '--even', type=int,
help="evenly distribute points in meters")
parser.add_argument('-o', '--output-file', default="alignment.png",
help="output filename")
parser.add_argument('-s', '--separate_tracks', action='store_true',
help="output original tracks to separate images")
args = parser.parse_args()
if args.debug:
_log.setLevel(logging.DEBUG)
logging.getLogger('geotiler.tilenet').setLevel(logging.DEBUG)
gpx1 = gpxpy.parse(args.gpx_file1)
gpx2 = gpxpy.parse(args.gpx_file2)
gap_penalty = -args.cutoff
# Join all the points from all segments for the track into a single list
gpx1_points = [p for s in gpx1.tracks[0].segments for p in s.points]
gpx2_points = [p for s in gpx2.tracks[0].segments for p in s.points]
# Evenly distribute the points
if args.even:
gpx1_points = geo.interpolate_distance(gpx1_points, args.even)
gpx2_points = geo.interpolate_distance(gpx2_points, args.even)
# Run the alignment
a1, a2 = align_tracks(gpx1_points, gpx2_points, gap_penalty)
# Calculate map bounding box with padding
padding_pct = 10
bounds1 = gpx1.get_bounds()
bounds2 = gpx2.get_bounds()
bbox1 = gfx.add_padding((bounds1.min_longitude, bounds1.min_latitude,
bounds1.max_longitude, bounds1.max_latitude), 10)
bbox2 = gfx.add_padding((bounds2.min_longitude, bounds2.min_latitude,
bounds2.max_longitude, bounds2.max_latitude), 10)
bbox = (min(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3]))
# Draw tracks and alignment
if args.separate_tracks:
gpx1_surface = gfx.draw_track(gpx1_points, bbox1)
gpx1_img_filename = "{}.png".format(
os.path.basename(os.path.splitext(args.gpx_file1.name)[0]))
_log.info("Saving original track to '{}'".format(gpx1_img_filename))
gpx1_surface.write_to_png(gpx1_img_filename)
gpx2_surface = gfx.draw_track(gpx2_points, bbox2)
gpx2_img_filename = "{}.png".format(
os.path.basename(os.path.splitext(args.gpx_file2.name)[0]))
_log.info("Saving original track to '{}'".format(gpx2_img_filename))
gpx2_surface.write_to_png(gpx2_img_filename)
surface = draw_alignment(a1, a2, bbox)
_log.info("Saving alignment to '{}'".format(args.output_file))
surface.write_to_png(args.output_file)
# Output the difference in the tracks as a percentage
match = 0
for i in range(0, len(a1)):
if a1[i] is not None and a2[i] is not None:
match += 1
total_similar = match / len(a1)
_log.info("Track Similarity: {:.2%}".format(total_similar))
```
#### File: jonblack/cmpgpx/gfx.py
```python
import logging
import math
_log = logging.getLogger(__name__)
import cairocffi as cairo
import geotiler
def draw_track(track, bounds):
""" Draws the given tracks with the given bounds onto a cairo surface. """
_log.info("Drawing track")
mm = geotiler.Map(extent=bounds, zoom=14)
width, height = mm.size
image = geotiler.render_map(mm)
# create cairo surface
buff = bytearray(image.convert('RGBA').tobytes('raw', 'BGRA'))
surface = cairo.ImageSurface.create_for_data(
buff, cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
p_radius = 2
for p in track:
cr.set_source_rgba(0.0, 0.0, 1.0, 1.0)
a1_x, a1_y = mm.rev_geocode((p.longitude, p.latitude))
cr.arc(a1_x, a1_y, p_radius, 0, 2 * math.pi)
cr.fill()
return surface
def add_padding(bbox, padding_pct):
""" Add the given percentage padding to the given bounding box. """
min_lat = bbox[1]
max_lat = bbox[3]
min_lon = bbox[0]
max_lon = bbox[2]
lat_pad = ((max_lat - min_lat) / 100) * padding_pct
lon_pad = ((max_lon - min_lon) / 100) * padding_pct
bbox = (min_lon - lon_pad, min_lat - lat_pad,
max_lon + lon_pad, max_lat + lat_pad)
return bbox
``` |
{
"source": "jonblatho/covid-19",
"score": 3
} |
#### File: jonblatho/covid-19/add-vaccine-data.py
```python
import sys, os
import json
import csv
from argparse import ArgumentParser
from utilities import utilities
# Argument parsing setup
parser = ArgumentParser()
parser.add_argument('path', type=str, help='The path to the vaccination CSV data file to process.')
parser.add_argument('--type', type=str, choices=['doses', 'initiated', 'completed'], help='The vaccination data file type. This argument is optional if the path is provided and the data file names are present and unchanged from what was received from the DHSS server.')
parser.add_argument('--quiet', action='store_true', help='Suppress non-error output.')
args = parser.parse_args()
# Pull arguments from arg parser
data_path = args.path
data_type = args.type
# If type isn't provided, attempt to guess the type
if data_type is None and data_path is not None:
if "doses" in data_path.lower():
data_type = "doses"
elif "initiated" in data_path.lower():
data_type = "initiated"
elif "completed" in data_path.lower():
data_type = "completed"
else:
print("Unable to guess the type of vaccine data file provided.")
print('Please retry and explicitly provide the type as "--type doses", "--type initiated", or "--type completed" following the file path. Exiting.')
exit(1)
with open(data_path, 'r', encoding='utf-8') as csv_file:
dates = [d["date"] for d in utilities.data.all]
def __reformatted_date__(d: str):
components = [int(component) for component in d.split('/')]
for k, component in enumerate(components):
if component < 10:
components[k] = f"0{component}"
else:
components[k] = str(component)
return f"{components[2]}-{components[0]}-{components[1]}"
reader = csv.reader(csv_file, delimiter=',')
# Filter to Howell County rows only
howell_rows = [row for row in reader if row[1] == "Howell"]
for row in howell_rows:
# Reformat the date to our expected YYYY-MM-DD format
row[0] = __reformatted_date__(row[0])
# Reorder rows list by date in ascending order
rows = sorted(howell_rows, key = lambda i : i[0])
# Set up cumulative total variable for initiated/completed vaccinations
cumulative_total = 0
for day in utilities.data.all:
if utilities.date.date_is_before(day["date"], '2020-12-15'):
# No doses were administered before 2020-12-15
if not args.quiet:
print(day["date"], "is before 2020-12-14. Skipping.")
day["vaccinations"] = None
continue
# Filter to rows for the current date
date_rows = [row for row in rows if row[0] == day["date"]]
if data_type == 'doses':
value_index = 2
elif data_type == 'initiated' or data_type =='completed':
value_index = 3
# Sum the filtered values
value = sum([int(v[value_index].replace(',','')) for v in date_rows])
cumulative_total += value
# Store data
if "vaccinations" not in day or day["vaccinations"] is None:
day["vaccinations"] = {"doses": None, "initiated": None, "completed": None}
day["vaccinations"][data_type] = value
# Save monthly data
months = utilities.unique([d["date"][:7] for d in utilities.data.all])
for month in months:
utilities.save_json(utilities.data.data_for_month(month), f'daily-data/{month}.json', quiet=args.quiet)
```
#### File: covid-19/utilities/utilities.py
```python
import json
from . import geo, date, data, calc
geo = geo
date = date
data = data
calc = calc
# Outputs JSON for the given dictionary or list to the given path.
def save_json(x, path, quiet=False): # pragma: no cover
with open(path, 'w+') as output_file:
output_file.write(json.dumps(x, separators=(',', ':')))
if not quiet:
print(f'Saved {path}')
# Returns only the unique elements in a list
def unique(l):
unique_list = []
for item in l:
if item not in unique_list:
unique_list.append(item)
return unique_list
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.