metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JoseEstevan/GenderClass",
"score": 3
} |
#### File: JoseEstevan/GenderClass/app.py
```python
import streamlit as st
from sklearn.externals import joblib
import time
from PIL import Image
from sklearn.feature_extraction.text import CountVectorizer
import hyperlink
gender_vectorizer = open("gender_vectorizer.pkl",'rb')
gender_cv = joblib.load(gender_vectorizer)
gender_nv_model = open("naivebayesgendermodel.pkl",'rb')
gender_clf = joblib.load(gender_nv_model)
def predict_gender(data):
vect = gender_cv.transform(data).toarray()
result = gender_clf.predict(vect)
return result
def load_css(file_name):
with open(file_name) as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
def load_icon(icon_name):
st.markdown('<i class="material-icons">{}</i>'.format(icon_name), unsafe_allow_html=True)
def load_images(file_name):
img = Image.open(file_name)
return st.image(img,width=300)
def main():
st.title("Classificação de Gênero com ML")
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">GenderClass App</h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
load_css('icone.css')
load_icon('people')
name = st.text_input("Digite o nome" )
if st.button("Classificar"):
result = predict_gender([name])
if result[0] == 0:
prediction = 'Mulher'
c_img = 'mulher.png'
elif result[0] == 1:
prediction = 'Homem'
c_img = 'homem.png'
st.success('{} foi classificado como {}'.format(name.title(),prediction))
load_images(c_img)
if __name__ == '__main__':
main()
if st.checkbox("Sobre"):
st.text("GenderClass é um app de classificação de gênero com base no nome\nFaz uso da biblioteca scikit-learn e da linguagem Python")
st.subheader('Redes Sociais')
medium = hyperlink.parse(u'https://medium.com/@joseestevan')
st.markdown(medium.to_text())
link = hyperlink.parse(u'https://www.linkedin.com/in/joseestevan/')
st.markdown(link.to_text())
git = hyperlink.parse(u'https://github.com/JoseEstevan')
st.markdown(git.to_text())
st.markdown('')
st.markdown('Obs: Se a classificação estiver errada tente com o sobrenome, o modelo está sendo revisado por conta de alguns erros.')
st.subheader('By: <NAME>')
``` |
{
"source": "JoseETeixeira/vulkan-renderer",
"score": 3
} |
#### File: vulkan-renderer/helper/req_check.py
```python
from argparse import ArgumentParser
from pathlib import Path
import pkg_resources
import sys
def cmd_parser() -> ArgumentParser:
parser = ArgumentParser(prog="req_check",
description="Check if a python installation matches a requirements.txt file")
parser.add_argument(dest='req_file', type=str, help='path to requirements.txt')
parser.add_argument('--quiet', dest='quiet', action='store_true', default=False, help='output nothing')
return parser
def main():
args = cmd_parser().parse_args(sys.argv[1:])
with Path(args.req_file).open(mode='r', encoding='UTF-8') as reader:
dependencies = reader.read()
success = True
for pkg in dependencies.split('\n'):
try:
pkg_resources.require(pkg)
except pkg_resources.DistributionNotFound:
success = False
if not args.quiet:
print(f"Did not found '{pkg}', but is required.")
except pkg_resources.VersionConflict as ex:
success = False
if not args.quiet:
print(f"Found: '{ex.dist}', but '{ex.req}' is required.")
else:
if not args.quiet:
print(f"Found: '{pkg}'.")
exit(0) if success else exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "Josef212/CoquoBot",
"score": 3
} |
#### File: CoquoBot/CoquoBot/command.py
```python
import abc
import json
from telegram import Update, CallbackQuery, Message
from telegram.ext import CommandHandler
class Command(metaclass=abc.ABCMeta):
def __init__(self, app):
self.app = app
self.name = "unasigned"
self.cmd = None
def get_command(self) -> CommandHandler:
if self.cmd == None:
self.cmd = CommandHandler(self.name, lambda u, c: self.do_execute(u, c))
return self.cmd
def do_execute(self, update: Update, ctx) -> None:
self.__update = update
self.__ctx = ctx
self.execute(update, ctx)
@abc.abstractmethod
def execute(self, update: Update, ctx) -> None:
pass
def get_help(self):
return f'{self.name}: {self.name}'
def _set_cmd_data(self, update: Update, ctx) -> None:
self.__update = update
self.__ctx = ctx
def _get_effective_message(self) -> Message:
# TODO: Could use effective_message. This is none for inline_cbk and some more cases but would do the trick as this one
query = self.__update.callback_query
return self.__update.message if query == None else query.message
def _get_username(self) -> str:
query = self.__update.callback_query
user = self.__update.message.from_user if query == None else query.from_user
return user.username if user.username != None else f'{user.first_name} {user.last_name}'
def _get_user_lang(self) -> str:
query = self.__update.callback_query
return self.__update.message.from_user.language_code if query == None else query.from_user.language_code
def _get_chat_id(self) -> int:
return self._get_effective_message().chat.id
def _send_message(self, msg: str, markup=None) -> Message:
chat_id = self._get_chat_id()
return self.app.bot.send_message(chat_id, msg, reply_markup=markup)
def _reply_message(self, msg: str, markup=None) -> None:
self._get_effective_message().reply_text(msg, reply_markup=markup)
def _query_edit_message(self, query: CallbackQuery, msg: str, markup=None) -> None:
query.edit_message_text(msg, reply_markup=markup)
def _get_inline_btn_args_from_query(self, query: CallbackQuery) -> list:
return query.data.split('#')
def _inline_btn(self, text: str, cbk_data: str = ''):
return { 'text': text, 'callback_data': cbk_data }
def _build_keyboard(self, keyboard: list):
return json.dumps({ 'inline_keyboard': keyboard })
def _execute_command(self, cmd_name: str) -> None:
cmd = self.app.get_command(cmd_name)
if cmd == None:
self.app.error(f'Could not find command named: {cmd_name}')
return
cmd.do_execute(self.__update, self.__ctx)
# Command implementation example
class TestCmd(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["TestCmd", "proves"]
def execute(self, update, ctx):
self._reply_message(f'Executing [{self.name}] command')
def get_help(self):
return f'{self.name}: A simple test command that displays a basic text.'
```
#### File: CoquoBot/commands/cmd_edit_order.py
```python
from threading import Lock
from command import Command
from loc_keys import LocKeys
from telegram import Update
from telegram.ext import CallbackQueryHandler
ADD_KEY='ADD_KEY'
REMOVE_KEY='REMOVE_KEY'
class CmdEditOrder(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["edit_order"]
self.__key = 'edit_order#'
self.__finish = 'edit_finish#'
self.__ignore = 'edit_ignore#'
app.dispatcher.add_handler(CallbackQueryHandler(lambda u, c: self.__item_cbk(u, c), pattern=f'^{self.__key}'))
app.dispatcher.add_handler(CallbackQueryHandler(lambda u, c: self.__finish_cbk(u, c), pattern=f'^{self.__finish}'))
app.dispatcher.add_handler(CallbackQueryHandler(lambda u, c: self.__ignore_cbk(u, c), pattern=f'^{self.__ignore}'))
def execute(self, update: Update, ctx) -> None:
user = self.__try_get_user_from_args(update)
user = user if user != None else self._get_username()
lang = self._get_user_lang()
chat_id = self._get_chat_id()
msg = self.get_cmd_msg(chat_id, lang, user)
markup = self.build_edit_order_keyboard(chat_id, user, lang)
self._reply_message(msg, markup)
def get_cmd_msg(self, chat_id: int, lang: str, user: str) -> str:
has_order = self.app.user_has_any_order(chat_id, user)
loc = self.app.localization
msg = loc.get_text_format(lang, LocKeys.EDIT_ORDER_USER_ORDER, user)
if not has_order:
text = loc.get_text(lang, LocKeys.EDIT_ORDER_EMPTY)
msg += f'\n {text}'
return msg
def build_edit_order_keyboard(self, chat_id: int, user: str, lang: str):
loc = self.app.localization
order = self.app.get_user_order(chat_id, user)
cart = order['cart']
keyboard = list()
for item in cart:
amount = cart[item]
msg = f'{amount}x {item}'
add_cbk_data = f'{self.__key}{user}#{ADD_KEY}#{item}'
ignore_cbk_data = f'{self.__key}{user}' # TODO: Maybe instead of an add button simply using item btn as add
remove_cbk_data = f'{self.__key}{user}#{REMOVE_KEY}#{item}'
keyboard.append([
self._inline_btn('-', remove_cbk_data),
self._inline_btn(msg, ignore_cbk_data),
self._inline_btn('+', add_cbk_data),
])
if len(keyboard) == 0:
return None
finish = loc.get_text(lang, LocKeys.BTN_FINISH)
keyboard.append([self._inline_btn(finish, f'{self.__finish}{user}')])
return self._build_keyboard(keyboard)
def __ignore_cbk(self, update: Update, ctx):
self._set_cmd_data(update, ctx)
update.callback_query.answer()
def __item_cbk(self, update: Update, ctx) -> None:
self._set_cmd_data(update, ctx)
query = update.callback_query
query.answer()
args = self._get_inline_btn_args_from_query(query)
user = args[1] # TODO: If we actually can get user that clicked we should get it and remove the user encoded on the cbk_data
mode = args[2]
item = args[3]
modifier = 1 if mode == ADD_KEY else -1
chat_id = self._get_chat_id()
lang = self._get_user_lang()
self.app.add_to_order(chat_id, user, item, modifier)
msg = self.get_cmd_msg(chat_id, lang, user)
markup = self.build_edit_order_keyboard(chat_id, user, lang)
self._query_edit_message(query, msg, markup)
def __finish_cbk(self, update: Update, ctx) -> None:
self._set_cmd_data(update, ctx)
query = update.callback_query
query.answer()
args = self._get_inline_btn_args_from_query(query)
user = args[1] # TODO: If we actually can get user that clicked we should get it and remove the user encoded on the cbk_data
chat_id = self._get_chat_id()
lang = self._get_user_lang()
get_order_cmd = self.app.get_command('get_order_for')
full_order = self.app.get_user_order(chat_id, user)
title = self.app.localization.get_text_format(lang, LocKeys.EDIT_ORDER_TITLE_UPDATED, user)
msg = get_order_cmd.format_order(full_order, title, lang)
self._query_edit_message(query, msg, None)
def __try_get_user_from_args(self, update: Update) -> str:
query = update.callback_query
if query != None:
return None
args = update.message.text.split()
if len(args) >= 2:
# Will assume name is the first argument and ignore if more arguments are sent
user = args[1]
if len(user) == 0:
return None
if user[0] == '@':
user = user[1:]
# TODO: Check if the user is on the chat
return user
return None
```
#### File: CoquoBot/commands/cmd_menu.py
```python
from command import Command
from telegram import Update
class CmdWeb(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["web"]
def execute(self, update: Update, ctx) -> None:
url = self.app.menu.get_menu_web()
self._send_message(url)
class CmdMenu(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["menu"]
def execute(self, update: Update, ctx) -> None:
msg = self.get_menu_text()
self._send_message(msg)
def get_menu_text(self) -> str:
# TODO: Could localize the menu
menu = self.app.menu
menu_list = menu.get_menu_list()
msg = f'COQUO menu:\n'
for item in menu_list:
item_price = menu.get_item_price(item)
msg += f' - {item}: {item_price}€\n'
return msg
```
#### File: CoquoBot/commands/cmd_reset_order.py
```python
from command import Command
from loc_keys import LocKeys
from telegram import Update
class CmdResetOrder(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["reset"]
def execute(self, update: Update, ctx) -> None:
chat_id = self._get_chat_id()
self.app.reset_order(chat_id)
lang = self._get_user_lang()
msg = self.app.localization.get_text(lang, LocKeys.ORDER_RESET_DONE)
self._reply_message(msg)
```
#### File: CoquoBot/commands/cmd_start.py
```python
from command import Command
from loc_keys import LocKeys
from telegram import Update
class CmdStart(Command):
def __init__(self, app):
super().__init__(app)
self.name = ["start"]
def execute(self, update: Update, ctx) -> None:
lang = self._get_user_lang()
msg = self.app.localization.get_text(lang, LocKeys.START_MSG)
self._reply_message(msg)
```
#### File: CoquoBot/CoquoBot/order_manager.py
```python
from order import Order
class OrderManager:
def __init__(self):
self.orders = {}
def user_has_any_order(self, chat_id: int, user: str) -> bool:
order = self.get_order(chat_id)
return order.user_has_any_order(user)
def get_order(self, id: int) -> Order:
if id not in self.orders:
self.orders[id] = Order()
return self.orders[id]
def reset_order(self, id: int) -> None:
self.get_order(id).reset()
``` |
{
"source": "josefdlange/slackborg",
"score": 2
} |
#### File: slackborg/slackborg/borg.py
```python
import time
from slackclient import SlackClient as _SlackClient
from conversations import ConversationManager
from commands import CommandManager
class SlackClient(_SlackClient):
# I'm not sure if it's my local environment, but the Slack Client
# swallows an error where the websocket client beneath can't find
# the root CA file.
def __init__(self, *args, **kwargs):
super(SlackClient, self).__init__(*args, **kwargs)
def patched_connect_slack_websocket(self, ws_url):
try:
import websocket
import ssl
sslopt_ca_certs = {}
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.websocket = websocket.create_connection(ws_url, sslopt=sslopt_ca_certs)
except Exception as e:
print e
print 'Failed WebSocket Connection.'
self.server.__class__.connect_slack_websocket = patched_connect_slack_websocket
class SlackBorg(object):
def __init__(self, bot_id, bot_token, **kwargs):
self.bot_id = bot_id
self.bot_token = bot_token
self.client = SlackClient(bot_token)
self.read_delay = kwargs.get('read_delay', 1)
self.conversation_manager = ConversationManager(self.client)
self.command_manager = CommandManager()
self.triggers = kwargs.pop('triggers', []) + ["<@{}>:".format(self.bot_id)]
def run(self):
if self.client.rtm_connect():
while True:
self.handle_messages(self.client.rtm_read())
time.sleep(self.read_delay)
else:
print "Error connecting to Slack RTM API!"
def handle_messages(self, messages):
for message in messages:
print message
if 'message' in message.get('type', '') and 'text' in message and 'user' in message:
conversation = self.conversation_manager.process_message(message)
if conversation.user_id == self.bot_id:
print "Message from myself. Ignoring!"
conversation.close()
elif conversation._command or ((self.does_trigger(message['text']) or self.is_dm(message['channel']))):
conversation.load_data_if_necessary()
self.command_manager.handle_conversation(conversation)
else:
print "I don't care about this conversation. Ignoring!"
conversation.close()
def is_dm(self, channel_id):
channels = self.client.api_call('im.list').get('ims', [])
return any([c['id'] == channel_id for c in channels])
def does_trigger(self, message_text):
return any([t in message_text for t in self.triggers])
# End
``` |
{
"source": "josefdlange/validict",
"score": 3
} |
#### File: validict/validict/__init__.py
```python
from __future__ import unicode_literals
# Normalize python2 and python3 vacaboulary
# http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
is_python2 = str != unicode
except NameError:
# 'unicode' is undefined, must be Python 3
is_python2 = False
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
bytes = str
def validate(template, unvalidated, quiet=False, **kwargs):
try:
if isinstance(template, tuple):
# We have multiple options on the template level.
valid = False
for template_option in template:
try:
valid = validate(template_option, unvalidated, **kwargs)
if valid:
break
except FailedValidationError:
pass
if valid:
return True
else:
raise FailedValidationError("None of {0} in template match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, dict) and isinstance(unvalidated, dict):
# Two dictionaries. Compare key-by-key!
if all([validate(template[key], unvalidated.get(key), **kwargs) for key in template]):
return True
else:
raise FailedValidationError("{0} in template did not match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, list) and isinstance(unvalidated, list):
# Two lists. The template list should have one element to demonstrate its members'
# structure. This can be a tuple.
if all([validate(template[0], item, **kwargs) for item in unvalidated]):
return True
else:
raise FailedValidationError("Not all list items in {0} matched template {1}".format(unvalidated, template))
elif isinstance(template, type):
# Template declared a type. Time to compare values.
if template in (str, unicode) and kwargs.get('fuzzy_string_typing'):
template = basestring
if isinstance(unvalidated, template):
return True
else:
raise FailedValidationError("{0} is not of type {1}".format(unvalidated, template))
else:
if template == unvalidated or template is None:
return True
else:
raise FailedValidationError("{0} is not equal to {1}".format(unvalidated, template))
except FailedValidationError as e:
if quiet:
return False
else:
raise e
class FailedValidationError(Exception):
pass
def deep_merge(base, incoming):
if not isinstance(base, dict) or not isinstance(incoming, dict):
return incoming
for key in incoming:
if key in base:
base[key] = deep_merge(base[key], incoming[key])
else:
base[key] = incoming[key]
return base
``` |
{
"source": "josefdolezal/fit-cvut",
"score": 3
} |
#### File: libs/cinemas/mvMovieSchedules.py
```python
from DataSource import CinemaCity
from bs4 import BeautifulSoup
from datetime import datetime
import re
class MovieSchedule:
def __init__( self, url, date ):
self.url = url
self._load_schedule()
def closest_movies( self ):
print( self.movies )
def _load_schedule( self ):
cc = CinemaCity( self. url )
self._parse_schedule( cc.movie_schedule() )
def _parse_schedule( self, html ):
soup = BeautifulSoup( html, 'html.parser' )
movies = soup.find_all( 'tr' )
self.movies = [ Movie( movie ) for movie in movies if movie.td ]
class Movie:
def __init__( self, soup ):
self._remove_empty_elements( soup )
self._parse_input( soup )
def __str__( self ):
return self.to_json()
def __repr__( self ):
return self.to_json()
def to_json( self ):
js = '{'
js += '"name":"{}", "pg":"{}", "type":"{}", '.format(
self.name, self.pg, self.mtype )
js += '"language":"{}", "duration":"{}", '.format(
self.language, self.duration )
js += '"showtimes":{}'.format( self._showtimes_to_json() )
js += '}'
return js
def _parse_input( self, soup ):
self.name = soup.td.extract().a.string
self.pg = soup.td.extract().string
self.mtype = soup.td.extract().string
self.language = soup.td.extract().string
self.duration = soup.td.extract().string
self._setup_showtimes( soup )
def _remove_empty_elements( self, soup ):
for el in soup.find_all( 'td' ):
if not el.contents:
el.extract()
def _setup_showtimes( self, soup ):
self.showtimes = [ datetime.strptime( e.a.string.strip(), '%H:%M' ).time() for e in soup.find_all( 'td' ) ]
def _showtimes_to_json( self ):
js = '['
for show in self.showtimes:
if show is not self.showtimes[0]:
js += ', '
js += '"{}"'.format( show.strftime( '%H:%M' ) )
js += ']'
return js
```
#### File: beecon/campaigns/models.py
```python
from django.contrib.auth.models import User
from django.db import models
class App( models.Model ):
name = models.CharField( max_length=60 )
api_class = models.CharField( max_length=60 )
def __str__( self ):
return self.name
class AppConfig( models.Model ):
name = models.CharField( max_length=60 )
app = models.ForeignKey( App, on_delete=models.PROTECT )
required = models.BooleanField( default=True )
def __str__( self ):
return self.name
class Campaign( models.Model ):
name = models.CharField( max_length=200 )
code = models.CharField( max_length=50, unique=True )
beacon_uuid = models.CharField( 'Beacon UUID', max_length=200 )
app = models.ForeignKey( App, on_delete=models.PROTECT )
manager = models.ForeignKey( User, on_delete=models.CASCADE )
def __str__( self ):
return self.name
class AccessToken( models.Model ):
description = models.CharField( max_length=50 )
token = models.CharField( max_length=65 )
date_added = models.DateTimeField()
campaign = models.ForeignKey( Campaign, on_delete=models.CASCADE )
class Location( models.Model ):
name = models.CharField( max_length=200 )
beacon_major = models.CharField( max_length=10 )
campaign = models.ForeignKey( Campaign, on_delete=models.CASCADE )
def __str__( self ):
return self.name
class Service( models.Model ):
name = models.CharField( max_length=60 )
beacon_minor = models.CharField( max_length=10 )
location = models.ForeignKey( Location, on_delete=models.CASCADE )
def __str__( self ):
return self.name
class ServiceParams( models.Model ):
key = models.ForeignKey( AppConfig, on_delete=models.PROTECT )
value = models.CharField( max_length=60 )
service = models.ForeignKey( Service, on_delete=models.PROTECT )
def __str__( self ):
return '{}'.format( self.key )
```
#### File: beecon/campaigns/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from django.conf import settings
from importlib import import_module
import imp, json
from os import path
import sys
from .models import App, Campaign, ServiceParams, Service
def api( request ):
app = get_object_or_404( Campaign, code=request.GET['app_code'] )
return render( request, 'campaigns/test.html', { 'app': app } )
class JsonView:
@classmethod
def response( cls, request ):
app_code = request.GET['app_code']
location = request.GET['location']
service = request.GET['service']
data = []
response = {}
try:
campaign = get_object_or_404( Campaign, code=app_code )
library = campaign.app.api_class
service = Service.objects.get(
beacon_minor=service,
location__beacon_major=location,
location__campaign__code=app_code
)
config = ServiceParams.objects.filter( service__pk=service.pk )
response['app'] = campaign.name
response['uuid'] = campaign.beacon_uuid
response['service'] = service.name
response['location'] = service.location.name
api = cls.import_api( library )
response["data"] = api.entry_point( cls.campaing_configuration( config ) )
except:
return JsonResponse( cls.empty_response() )
return JsonResponse( cls.base_response( response ) )
@classmethod
def empty_response( cls ):
return cls.base_response( { 'status': 500, 'status_text': 'Bad request format.' } )
@classmethod
def base_response( cls, args = {} ):
body = {
'status': 200,
'status_text': 'OK',
'app': 'Unknow',
'location': 'Unknown',
'service': 'Unknow',
'data': {},
'uuid': 'Unknown',
'prefered_view': 'app.default',
}
for key, val in args.items():
if key in body:
body[key] = val
return body
@classmethod
def import_api( cls, mod ):
mod = mod.split('.')
m = import_module( '..libs.' + mod[0], __name__ )
return getattr( m, mod[1] )
@classmethod
def campaing_configuration( cls, configs ):
configuration = {}
for c in configs:
configuration[ '%s' % c.key] = c.value
return configuration
def info( request, app_code ):
try:
campaign = Campaign.objects.get( code=app_code )
except:
return JsonResponse( JsonView.empty_response() )
return JsonResponse( JsonView.base_response( { 'app': campaign.name, 'uuid': campaign.beacon_uuid } ) )
def services( request, app_code ):
data = {}
try:
services = Service.objects.filter( location__campaign__code=app_code )
campaign = Campaign.objects.get( code=app_code )
data['app'] = campaign.name
data['uuid'] = campaign.beacon_uuid
data = JsonView.base_response( data )
except:
return JsonResponse( JsonView.empty_response() )
data = JsonView.base_response( data )
data['services'] = [ {
"name": service.name,
"major": service.location.beacon_major,
"minor": service.beacon_minor,
} for service in services ]
return JsonResponse( data )
```
#### File: beecon/polls/models.py
```python
import datetime
from django.db import models
from django.utils import timezone
class Question( models.Model):
question_text = models.CharField( max_length=200 )
pub_date = models.DateTimeField( 'date published' )
def __str__( self ):
return self.question_text
def was_published_recently( self ):
return self.pub_date >= timezone.now() - datetime.timedelta( days=1 )
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey( Question, on_delete=models.CASCADE )
choice_text = models.CharField( max_length=200 )
votes = models.IntegerField( default=0 )
def __str__( self ):
return self.choice_text
``` |
{
"source": "JosefDoun/Covid_19_Government_Measures_Assessment_Dataset",
"score": 3
} |
#### File: JosefDoun/Covid_19_Government_Measures_Assessment_Dataset/Dataset.py
```python
import os
import pandas as pd
__author__ = '<NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
class Dataset:
def __init__(self, filename):
self.name = filename
self.format = filename[filename.index('.', -7):]
self.path = os.path.join(os.getcwd(), 'data', self.name)
if self.format == '.csv':
if 'GHCN_avg' in self.name:
self.df = pd.read_csv(self.path, header=None)
elif '.POP.' in self.name:
self.df = pd.read_csv('data\API_SP.POP.TOTL_DS2_en_csv_v2_1217749.csv', header=2)
else:
self.df = pd.read_csv(self.path)
elif self.format == '.xlsx':
self.df = pd.read_excel(self.path, sheet_name='Database')
def preprocess(self):
""" Current method cleans source data and organizes them into dictionaries with countries as keys """
if 'acaps' in self.name:
self.df.LOG_TYPE = self.df.LOG_TYPE.map({'Introduction / extension of measures' : 1,
'Phase-out measure' : -1})
self.df = self.df[['COUNTRY', 'DATE_IMPLEMENTED', 'MEASURE', 'NON_COMPLIANCE', 'LOG_TYPE', 'COMMENTS']]
self.df.rename(columns={'COUNTRY' : 'country',
'DATE_IMPLEMENTED' : 'date',
'MEASURE' : 'measures',
'NON_COMPLIANCE' : 'non_compliance',
'LOG_TYPE' : 'log_type'}, inplace=True)
self.df.country = self.df.country.apply(lambda x: x.capitalize())\
.apply(lambda x: 'Us' if x == 'United states of america'
else 'Russia' if x == 'Russian federation' else 'Moldova'
if x == 'Moldova republic of' else 'China' if 'China' in x
else 'North macedonia' if 'macedonia' in x else 'Korea, south'
if 'Korea republic' in x else x)
self.df.measures = self.df.measures.apply(lambda x: x.capitalize().strip())
self.df['date'] = self.df['date'].apply(lambda x: x.strip() if type(x) == str else x)
self.df['date'] = pd.to_datetime(self.df['date'], errors='raise')
self.df.dropna(subset=['date'], inplace=True)
self.df.set_index('country', inplace=True)
self.df = self.df[['measures', 'date', 'log_type', ]].sort_values('date')
unique_measures = self.df.measures.unique()
measures = {country : self.df.loc[country].set_index('date') for country in self.df.index.unique()}
for country in measures.keys():
for measure in unique_measures:
measures[country][measure] = measures[country].measures.apply(lambda x: int(measure == x))
measures[country][measure] = measures[country][measure] * measures[country].log_type
measures[country].drop(columns=['measures', 'log_type'], inplace=True)
measures[country] = measures[country].groupby('date').sum()
measures[country] = measures[country].resample('D').sum()
measures[country] = measures[country].cumsum(axis=0, skipna=True)
measures[country] = (measures[country] - measures[country].min(axis=0)) / \
(measures[country].max(axis=0) - measures[country].min(axis=0))
measures[country].fillna(0, inplace=True)
return measures
elif 'time_series' in self.name:
self.df.rename(columns={'Country/Region' : 'country'}, inplace=True)
self.df['country'] = self.df['country'].apply(lambda x: x.capitalize())\
.apply(lambda x: 'Czech republic' if
'Czech' in x else 'Congo' if
'brazzaville' in x else 'Congo dr'
if 'kinshasa' in x else "Côte d'ivoire"
if 'Cote' in x else x)
self.df = self.df.drop(columns=['Province/State', 'Lat', 'Long'], inplace=False)
self.df.set_index('country', inplace=True)
self.df = self.df.groupby('country').sum()
if 'confirmed' in self.name:
confirmed_cases = {i : row for i, row in self.df.iterrows() if row[-1] > 500}
for country, df in confirmed_cases.items():
df.index = pd.to_datetime(df.index, errors='raise')
confirmed_cases[country] = confirmed_cases[country][confirmed_cases[country] > 0]
return confirmed_cases
elif 'recovered' in self.name:
recoveries = {i : row for i, row in self.df.iterrows()}
for country, df in recoveries.items():
df.index = pd.to_datetime(df.index, errors='raise')
recoveries[country].sort_index(inplace=True)
return recoveries
elif 'deaths' in self.name:
deaths = {i : row for i, row in self.df.iterrows()}
for country, df in deaths.items():
df.index = pd.to_datetime(df.index, errors='raise')
deaths[country].sort_index(inplace=True)
deaths[country] = deaths[country][deaths[country] > 0]
return deaths
elif 'GHCN_avg' in self.name:
with open('data/GHCN_country_codes.txt', 'r') as f:
lines = f.readlines()
country_codes = {line[:2]: line[2:-1].strip() for line in lines}
f.close()
self.df[0] = self.df[0].apply(lambda x: x[:2]).map(country_codes).apply(lambda x: x.capitalize())
self.df[0] = self.df[0].apply(lambda x: 'Us' if 'United states' in x else 'Czech republic' if
'Czech' in x else 'Congo' if
'brazzaville' in x else 'Congo dr'
if 'kinshasa' in x else "Côte d'ivoire"
if 'Cote' in x else 'North macedonia'
if 'Macedonia' in x else 'Bahamas'
if 'Bahamas' in x else x)
self.df[1] = self.df[1].apply(str).apply(lambda x: "{}-{}-{}".format(x[:4], x[4:6], x[6:]))
self.df[1] = pd.to_datetime(self.df[1], errors='raise')
self.df = self.df[[0, 1, 3]].rename(columns={0 : 'country', 1 : 'date', 3 : 'avg_temp'})
self.df.set_index('country', inplace=True)
temperatures = {country : self.df.loc[country] for country in self.df.index.unique()}
for country in temperatures.keys():
temperatures[country] = temperatures[country].groupby('date').avg_temp.mean()
temperatures[country] = temperatures[country].resample('D').mean().interpolate('linear')\
.apply(int)
return temperatures
elif 'BUILT_UP' in self.name:
self.df = self.df[(self.df['MEAS'] == 'SQKM') & (self.df.Year == 2014)]
self.df = self.df[['Country', 'Value']].rename(columns={'Value' : 'Built_up_sqkm'})
self.df['Country'] = self.df['Country'].apply(lambda x: x.capitalize())\
.apply(lambda x: 'China' if 'China' in x or 'china' in x
else 'Korea, south' if 'Korea' in x else 'Congo dr'
if 'Democratic republic of the congo' in x else 'Us'
if x == 'United states' else 'Slovakia'
if 'Slovak' in x else x)
self.df.set_index('Country', inplace=True)
built_up = {i : row.values[0] for i, row in self.df.iterrows()}
return built_up
elif '.POP.' in self.name:
self.df = self.df[['Country Name', '2019']]
self.df['Country Name'] = self.df['Country Name'].apply(lambda x: x.capitalize())\
.apply(lambda x: 'Us' if x == 'United states'
else 'Congo dr' if x == 'Congo, dem. rep.'
else 'Congo' if x == 'Congo, rep.' else 'Russia'
if 'Russian' in x else "Côte d'ivoire"
if 'Cote' in x else 'Korea, south'
if x == 'Korea, rep.' else 'China' if 'China' in x
or 'china' in x else 'Iran' if 'Iran' in x else
'Kyrgyzstan' if 'Kyrgyz' in x else 'Egypt'
if 'Egypt' in x else 'Venezuela' if 'Venezuela'
in x else 'Yemen' if 'Yemen' in x else
'Dominican republic' if x == 'Dominica' else
'Slovakia' if 'Slovak' in x else x)
self.df.set_index('Country Name', inplace=True)
population = {i : row.values[0] for i, row in self.df.iterrows()}
return population
def sync(temperatures, measures, confirmed_cases, deaths,
built_up, population, recoveries):
countries_intersection = set(measures.keys()).intersection(confirmed_cases.keys(), recoveries.keys(),
deaths.keys(), temperatures.keys(),
built_up.keys(), population.keys())
countries_union = set(measures.keys()).union(confirmed_cases.keys(), recoveries.keys(),
deaths.keys(), temperatures.keys(),
built_up.keys(), population.keys())
for country in countries_union - countries_intersection:
deaths.pop(country) if country in deaths.keys() else ...
measures.pop(country) if country in measures.keys() else ...
recoveries.pop(country) if country in recoveries.keys() else ...
temperatures.pop(country) if country in temperatures.keys() else ...
confirmed_cases.pop(country) if country in confirmed_cases.keys() else ...
population.pop(country) if country in population.keys() else ...
built_up.pop(country) if country in built_up.keys() else ...
for country in measures.keys():
measures[country] = measures[country][(measures[country].index > confirmed_cases[country].index[0]) &
(measures[country].index < confirmed_cases[country].index[-1])]
assert country in confirmed_cases.keys(), country
assert country in temperatures.keys(), country
assert country in population.keys(), country
assert country in built_up.keys(), country
``` |
{
"source": "JosefDoun/Ikonos-2-Building-Segmentation-U-Net",
"score": 2
} |
#### File: JosefDoun/Ikonos-2-Building-Segmentation-U-Net/evaluation.py
```python
from osgeo import gdal_array, gdal
from model_architecture import BuildingsModel
from data_loader import to_tiles
from glob import glob
from tqdm import tqdm
from sys import argv
import matplotlib.pyplot as plt
import torch
import h5py
import os
import argparse
plt.rcParams.update({'font.size': 18})
parser = argparse.ArgumentParser("Model Evaluation on Test set")
parser.add_argument(
"--model",
type=str,
default="Models/ikonos-2-buildings-net-spring.pt"
)
def write_hdf5(tiles: int):
for x, y in zip(sorted(glob("Evaluation/x*.tif")), sorted(glob("Evaluation/y*.tif"))):
X = gdal_array.LoadFile(x) / (2**11)
Y = gdal_array.LoadFile(y)
X, Y = to_tiles(X, Y, 512)
x, y = x.split('/')[-1].split('.')[-2], y.split('/')[-1].split('.')[-2]
with h5py.File("Evaluation/test_data.hdf5", 'a') as f:
test_group_x = f.require_group('Test/X')
test_group_y = f.require_group('Test/Y')
test_group_x.create_dataset(
x,
shape=X.shape,
dtype='f',
data=X,
compression='gzip',
compression_opts=8,
maxshape=(
None,
4,
tiles,
tiles
)
)
test_group_y.create_dataset(
y,
shape=Y.shape,
dtype='i1',
data=Y,
compression='gzip',
compression_opts=8,
maxshape=(
None,
tiles,
tiles
)
)
# def expand(dataset, data, label: bool):
# """
# dataset: h5py dataset instance for expansion
# data: data to fit in dataset
# label: flag to assume dataset & data dimensions
# <false> NOT a label: <X>
# <true> a label: <Y>
# """
# if not label:
# dataset_shape = dataset.shape
# dataset.resize(
# (
# dataset_shape[0] + data.shape[0],
# dataset_shape[1],
# dataset_shape[2],
# dataset_shape[3]
# )
# )
# dataset[dataset_shape[0]:] = data
# elif label:
# dataset_shape = dataset.shape
# dataset.resize(
# (
# dataset_shape[0] + data.shape[0],
# dataset_shape[1],
# dataset_shape[2]
# )
# )
# dataset[dataset_shape[0]:] = data
# return
class Evaluate:
AREAS = ['URBAN', 'INDUSTRIAL', 'BACKGROUND']
C_MAT = {
"TP": (0, 0),
"FP": (0, 1),
"FN": (1, 0),
"TN": (1, 1)
}
def __init__(self) -> None:
f = h5py.File("Evaluation/test_data.hdf5", "r")
self.X = {
"URBAN": f['Test/X/x1'],
"INDUSTRIAL": f['Test/X/x2'],
"BACKGROUND": f['Test/X/x3']
}
self.Y = {
"URBAN": f['Test/Y/y1'],
"INDUSTRIAL": f['Test/Y/y2'],
"BACKGROUND": f['Test/Y/y3']
}
self.cli_args = parser.parse_args(argv[1:])
self.model = BuildingsModel(4, 16)
self.model.load_state_dict(
torch.load(self.cli_args.model)
)
self.model.eval()
self.sample_fig, self.sample_axes = plt.subplots(1, 3, figsize=(15, 10))
self.c_fig, self.c_ax = plt.subplots(figsize=(15, 10))
self.c_ax.xaxis.tick_top()
def main(self):
for AREA in tqdm(self.AREAS):
self.evaluate(AREA)
def evaluate(self, AREA):
c_matrix = torch.zeros(2, 2)
for i, (image, label) in tqdm(enumerate(zip(self.X[AREA], self.Y[AREA]))):
image, label, y_hat = self.predict(image, label)
self.add_metrics(label, y_hat, c_matrix)
self.write_prediction(image, label, y_hat, AREA, i)
for i in range(1, 3):
self.write_matrix(c_matrix, AREA, i)
def predict(self, image, label):
image = torch.from_numpy(image).unsqueeze_(0)
label = torch.from_numpy(label)
_, y_hat = self.model(image)[-1].max(-3)
return image, label, y_hat
def add_metrics(self, y, y_hat, c_matrix):
c_matrix[self.C_MAT['TP']] += ((y_hat == 1) & (y == 1)).sum()
c_matrix[self.C_MAT['FP']] += ((y_hat == 1) & (y == 0)).sum()
c_matrix[self.C_MAT['FN']] += ((y_hat == 0) & (y == 1)).sum()
c_matrix[self.C_MAT['TN']] += ((y_hat == 0) & (y == 0)).sum()
def write_matrix(self, c_matrix: torch.Tensor, AREA, i):
titles = {
1: 'Precision',
2: 'Recall'
}
self.c_ax.imshow(c_matrix / c_matrix.sum(-i, keepdim=True))
self.c_ax.set_title(titles[i])
self.c_ax.set_xticks([0, 1]), self.c_ax.set_xticklabels(['Buildings', 'Background'])
self.c_ax.set_xlabel('Actual')
self.c_ax.set_yticks([0, 1]), self.c_ax.set_yticklabels(['Buildings', 'Background'])
self.c_ax.set_ylabel('Predictions')
self.c_ax.set_label("Producer's Accuracy: % Correct Actual")
self.annotate(c_matrix, self.c_ax, i)
self.c_fig.tight_layout()
self.c_fig.savefig(f"Evaluation/Results/{AREA}_{titles[i]}.png")
self.c_ax.clear()
def annotate(self, c_matrix, ax, i):
pa = c_matrix / c_matrix.sum(-i, keepdim=True)
for key in self.C_MAT:
ax.annotate(text=f"{round(pa[self.C_MAT[key]].item()*100, 2)}%",
xy=(self.C_MAT[key][1]-0.1, self.C_MAT[key][0]),)
def write_prediction(self, img, label, y_hat, AREA, i):
self.sample_axes[0].imshow(img.squeeze(0).moveaxis(0, -1)[:, :, [2, 1, 0]])
self.sample_axes[0].set_label("X")
self.sample_axes[1].imshow(label.squeeze(0))
self.sample_axes[1].set_label('Y')
self.sample_axes[2].imshow(y_hat.squeeze(0))
self.sample_axes[2].set_label('y_hat')
self.sample_fig.tight_layout()
self.sample_fig.savefig(f"Evaluation/Predictions/{AREA}{i}.png")
for ax in self.sample_axes:
ax.clear()
if __name__ == '__main__':
if not os.path.exists("Evaluation/test_data.hdf5"):
write_hdf5(512)
Evaluate().main()
```
#### File: JosefDoun/Ikonos-2-Building-Segmentation-U-Net/test_classes.py
```python
import pytest
import random
import torch
import h5py
from torch.tensor import Tensor
from torch.utils.data import DataLoader
# with pytest.warns(DeprecationWarning):
from data_loader import Buildings
from model_architecture import BuildingsModel, DownSamplingBlock, UpSamplingBlock
from model_training import Training
class TestDataset:
training = Buildings()
validation = Buildings(validation=True)
def test_hdf5_datasets(self):
with h5py.File("Training/training_dataset.hdf5", mode='r') as f:
for image in f['training/Y/pos']:
assert image.mean() > 0
for image in f['training/X/pos']:
assert image.mean() > 0
def test_training_dataset_instantiation(self):
assert self.training
def test_validation_dataset_instantiation(self):
assert self.validation
def test_training_dataset_indexing(self):
assert self.training[0]
assert self.training[1]
assert self.training[len(self.training)-1]
with pytest.raises(Exception):
self.training[len(self.training)]
def test_validation_dataset_indexing(self):
assert self.validation[0]
assert self.validation[1]
assert self.validation[len(self.validation)-1]
with pytest.raises(Exception):
self.validation[len(self.validation)]
def test_training_dataset_output(self):
idx = int(random.random() * len(self.training))
output = self.training[idx]
assert isinstance(output, tuple)
assert isinstance(output[0], Tensor)
assert isinstance(output[1], Tensor)
assert output[0].dim() == 3
assert output[1].dim() == 2
assert output[0].size(-1) == output[1].size(-2)
assert output[1].size(-1) == output[1].size(-2)
# data = [self.training[i] for i in range(len(self.training))]
# s = set(data)
# assert len(data) == len(s), "The Dataset outputs duplicates"
def test_validation_dataset_output(self):
idx = int(random.random() * len(self.validation))
output = self.validation[idx]
assert isinstance(output, tuple)
assert isinstance(output[0], Tensor)
assert isinstance(output[1], Tensor)
assert output[0].dim() == 3
assert output[1].dim() == 2
assert output[0].size(-1) == output[1].size(-2)
assert output[1].size(-1) == output[1].size(-2)
# data = [self.validation[i] for i in range(len(self.validation))]
# s = set(data)
# assert len(data) == len(s), "The Dataset outputs duplicates"
def test_augmentation(self):
idx_t = int(random.random() * len(self.training))
idx_v = int(random.random() * len(self.validation))
successes = 0
for i in range(10):
assert torch.equal(self.validation[idx_v][0],
self.validation[idx_v][0]), f"{i}"
if not torch.equal(self.training[idx_t][0],
self.training[idx_t][0]):
successes += 1
assert 10 >= successes > 0, "Data Augmentation Test Failed"
class TestDataloader:
training_loader = DataLoader(Buildings(),
batch_size=256,
shuffle=True,
num_workers=4,
pin_memory=True)
validation_loader = DataLoader(Buildings(validation=True),
batch_size=256,
shuffle=True,
num_workers=4,
pin_memory=True)
def test_loader_instantiations(self):
assert self.training_loader
assert self.validation_loader
def test_loader_throughput(self):
# TODO
...
class TestModel:
def test_downsampling(self):
x = torch.randn(4, 2, 512, 512)
factor = 2
model = DownSamplingBlock(x.size(-3),
channel_up_factor=factor)
y, skip = model(x)
assert y.size(-1) == x.size(-1) // 2
assert y.size(-2) == x.size(-2) // 2
assert y.size(-3) == x.size(-3) * factor
def test_upsampling(self):
x = torch.randn(4, 2, 512, 512)
x_skip = torch.randn(4, 2, 1024, 1024)
factor = 2
model = UpSamplingBlock(x.size(-3),
channel_down_factor=factor,
skip_channels=x_skip.size(-3))
y = model(x, x_skip)
assert y.size(-1) == x.size(-1) * 2
assert y.size(-2) == x.size(-2) * 2
assert y.size(-3) == x.size(-3) // factor
def test_model_output(self):
x = torch.randn(3, 4, 512, 512)
model = BuildingsModel(x.size(1), 3)
z, p = model(x)
assert p.size(-1) == x.size(-1)
assert p.size(-2) == x.size(-2)
assert p.size(-3) == 2
assert p.size(-4) == x.size(-4)
# class TestTraining:
# training = Training()
``` |
{
"source": "josefeg/cloudacademy-dl",
"score": 2
} |
#### File: josefeg/cloudacademy-dl/setup.py
```python
from setuptools import setup
import os
import subprocess
from cloudacademy import __version__
def read_file(filename):
try:
with open(filename) as f:
return f.read()
except IOError:
return ''
def generate_long_description():
temp_file = 'temp.rst'
cmd = [
'pandoc',
'--from=markdown',
'--to=rst',
'--output=' + temp_file,
'README.md'
]
long_description = ''
try:
subprocess.call(cmd)
long_description = read_file(temp_file)
os.remove(temp_file)
except (IOError, OSError):
long_description = 'Could not generate docs. Refer to README.md'
return long_description
setup(
name='cloudacademy-dl',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/josefeg/cloudacademy-dl',
install_requires=[
'beautifulsoup4>=4.5.3',
'docopt>=0.6.2',
'lxml>=3.6.0',
'requests>=2.9.1'
],
description='A utility for downloading CloudAcademy.com lecture videos.',
long_description=generate_long_description(),
keywords=[
'cloudacademy-dl',
'cloudacademy',
'download',
'education',
'MOOCs',
'video'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Education',
],
packages=['cloudacademy'],
entry_points=dict(
console_scripts={
'cloudacademy-dl=cloudacademy.cloudacademy_dl:main'
}
),
platforms=['any']
)
``` |
{
"source": "JoseFernandez16/Laboratorio_6_TPL",
"score": 2
} |
#### File: JoseFernandez16/Laboratorio_6_TPL/ejemplo1.py
```python
from kivy.app import App
from kivy.uix.popup import Popup
from kivy.uix.relativelayout import RelativeLayout
class Pestaña(Popup):
pass
class MyRelativeLayout(RelativeLayout):
def open_popup(self):
pops = Pestaña()
pops.open()
class PopUpWindow(App):
def build(self):
return MyRelativeLayout()
if __name__ == "__main__":
window = PopUpWindow()
window.run()
``` |
{
"source": "JoseFernandez16/mecatronicaUNT_Prog2_SMTCPCSOS",
"score": 3
} |
#### File: mecatronicaUNT_Prog2_SMTCPCSOS/Paciente_App/informacion_screen.py
```python
from kivy.lang.builder import Builder
from kivymd.uix.screen import MDScreen
from kivy.lang import Builder
#from login_screen import LoginScreen
import webbrowser
kv="""
<InformacionScreen>:
name:'information_screen'
Screen:
FitImage:
source:'recursos/imagenes/fondo_2.jpg'
BoxLayout:
id:box
orientation: 'vertical'
size_hint: None,None
width:500
height:500
pos_hint: {"center_x": .5,"center_y": .5 }
padding:"30dp","15dp","30dp","30dp"
spacing:'10dp'
canvas:
Color:
rgba: 1,1,1, .5
RoundedRectangle:
pos:self.pos
size:self.size
radius: 10,10,10,10
MDLabel:
text:"Funcionamiento de la aplicacion"
font_style:"H6"
size_hint:1,.1
MDLabel:
id:indicaciones
font_style:"Body1"
MDRaisedButton:
pos_hint: {"center_x": .5,"center_y": .5 }
text: "Regístrese aquí"
text_color: 0,0,.4,1
md_bg_color: 1, 1, 1, 1
on_release:root.enviar_al_enlace()
"""
class InformacionScreen(MDScreen):
Builder.load_string(kv)
def on_pre_enter(self, *args):
self.ids["indicaciones"].text="""La siguiente app se conecta a una placa arduino la cual por medio de sensores permite que el paciente sea capaz de obtener los valores de tres de sus variables fisiológicas: Saturación de Oxígeno, Temperatura y Frecuencia Cardiaca.\n
De esta manera, el paciente envía los valores de las variables en diversos instantes del día durante el periodo de tiempo que sea determinado por el especialista, para asi tener un registro y monitoreo del estado médico del paciente.\n
Así mismo, el paciente es capaz de verificar el historial de sus registros, ordenados por fecha y hora en la que fueron agregados a la base de datos."""
def enviar_al_enlace(self):
webbrowser.open('https://stark-spire-16180.herokuapp.com/reg_paciente#')
```
#### File: mecatronicaUNT_Prog2_SMTCPCSOS/Paciente_App/login_screen.py
```python
from kivymd.uix.screen import MDScreen
from kivy.lang import Builder
from conexion_BD import Conexion_BD
from kivymd.app import MDApp
# importado para ir al enlace
import webbrowser
#importando la pantalla de navegacion
from navigation_screen import NavigationScreen
kv="""
<LoginScreen>:
name:'login_screen'
Screen:
FitImage:
source:'recursos/imagenes/fondo_1.jpg'
FloatLayout:
id:box
size_hint:None,None
width:500
height:500
pos_hint: {"center_x": .5,"center_y": .5 }
padding:"30dp","15dp","30dp","30dp"
spacing:'10dp'
canvas:
Color:
rgba: 1,1,1, .5
RoundedRectangle:
pos:self.pos
size:self.size
radius: 10,10,10,10
MDLabel:
pos_hint: {"center_x": .5,"center_y": .9 }
text:'INICIO DE SESIÓN'
bold:True
theme_text_color:'Custom'
text_color:0,0,0,1
halign:'center'
font_style:"H5"
MDLabel:
pos_hint: {"center_x": .5,"center_y": .8 }
text:'Bienvenido'
size_hint_x:.8
bold:True
font_style:"Subtitle1"
MDLabel:
pos_hint: {"center_x": .5,"center_y": .7 }
size_hint_x:.8
text:'Necesitamos que nos brindes algunos datos para poder hacer uso de este servicio'
font_style:'Body1'
MDTextFieldRound:
pos_hint: {"center_x": .5,"center_y": .55 }
size_hint_x:.8
id:correo
icon_left:'mail'
normal_color: 1,1,1,.5
icon_left_color: 0,0,0
MDTextFieldRound:
id:password
icon_left:'lock'
size_hint_x:.8
pos_hint: {"center_x": .5,"center_y": .4 }
normal_color: 1,1,1,.5
icon_left_color: 0,0,0
MDRaisedButton:
text:'Iniciar Sesión'
font_size: "15sp"
pos_hint: {"center_x": .5,"center_y": .25 }
on_release:root.iniciar_sesion()
MDLabel:
text:'Si no posee una cuenta,regístrese en el siguiente'
pos_hint: {"center_x": .5,"center_y": .125 }
halign:'center'
size_hint_x:.8
font_style:'Body1'
pos_hint_x:.5
MDFlatButton:
pos_hint: {"center_x": .5,"center_y": .075 }
text:'enlace'
size_hint_y:.2
text_color: 0, 0, .4, 1
elevation: 0
#md_bg_color:1,1,1,0
font_size: "16sp"
on_release: root.ir_enlace()
"""
class LoginScreen(MDScreen):
Builder.load_string(kv)
def __init__(self,**kw):
super().__init__(**kw)
self.app=MDApp.get_running_app()#
self.mi_conexion=Conexion_BD()
pantalla_navegacion=NavigationScreen()
self.app.root.add_widget(pantalla_navegacion)
def iniciar_sesion(self):
if self.mi_conexion.inicio_sesion(self.ids.correo.text,self.ids.password.text) ==False:
print('a ocurrido un error')
self.ids.correo.text=''
self.ids.password.text=''
else:
self.app.root.current='navigation_screen'
# print(self.ids.correo.text)
# print(self.ids.password.text)
def ir_enlace(self):
webbrowser.open('https://stark-spire-16180.herokuapp.com/reg_paciente#')
```
#### File: mecatronicaUNT_Prog2_SMTCPCSOS/Paciente_App/main.py
```python
from kivymd.app import MDApp
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen, ScreenManager
from splash_screen import SplashScreen
class PacienteApp(MDApp):
def build(self):
sm=ScreenManager()
self.title='Pacient-App'
self.theme_cls.primary_palette='Blue'
return sm
def on_start(self):
ss=SplashScreen()
self.root.add_widget(ss)
self.root.current='splash_screen'
PacienteApp().run()
```
#### File: mecatronicaUNT_Prog2_SMTCPCSOS/Paciente_App/navigation_screen.py
```python
from kivymd.uix.screen import MDScreen
from kivy.lang import Builder
from kivymd.uix.list import OneLineIconListItem,IconLeftWidget
from kivymd.app import MDApp
from functools import partial
import sys
class ListIcon(OneLineIconListItem):
def __init__(self,**kw):
super().__init__()
self.text=kw['text']
self.icon=IconLeftWidget(icon=kw['icon'])
self.add_widget(self.icon)
self.on_release=kw['on_release']
kv="""
<NavigationScreen>
name:'navigation_screen'
NavigationLayout:
id:nav_layout
ScreenManager:
MDScreen:
MDBoxLayout:
orientation:'vertical'
MDToolbar:
id:tool_bar
title:'Pacient-App'
left_action_items:[["menu",lambda x: nav_drawer.set_state()]]
ScreenManager:
id:screen_manager
MDNavigationDrawer:
id:nav_drawer
MDBoxLayout:
orientation:'vertical'
padding: "8dp"
spacing: "8dp"
Image:
size_hint_y: .3
source:'recursos/imagenes/logo1.jpg'
ScrollView:
MDList:
id:nav_list
OneLineIconListItem:
text:'Cerrar Sesión'
on_release:root.cerrar_sesion()
IconLeftWidget:
icon:"close-circle"
"""
class NavigationScreen(MDScreen):
Builder.load_string(kv)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.app=MDApp.get_running_app()
#lista de las pantallas (id,titulo[text],icono)
from pacientes_screen import PacientesScreen
from informacion_screen import InformacionScreen
from historial_screen import HistorialScreen
self.list_screen = {
PacientesScreen:('pacientes_screen','Enviar datos','file-send'),#solo se cambió el nombre debido al tiempo
InformacionScreen:('information_screen','Información','information'),
HistorialScreen:('historial_screen','Historial','history')
}
def on_enter(self, *args):
for screen,details in self.list_screen.items():
identification,text,icon=details
self.ids.screen_manager.add_widget(screen(name=identification))
self.ids.nav_list.add_widget(ListIcon(text=text,icon=icon,on_release=partial(self.button_list_actions,text,identification)))
def button_list_actions(self,title,identification):
self.ids.tool_bar.title=title
self.ids.screen_manager.current=identification
self.ids.nav_drawer.set_state()
def cerrar_sesion(self):
archivo_texto=open('info_paciente.txt','w')
archivo_texto.write('')
archivo_texto.close()
sys.exit()
``` |
{
"source": "joseferncruz/udacity-ds-project2",
"score": 4
} |
#### File: udacity-ds-project2/wrangling_scripts/gen_fig_data.py
```python
from collections import namedtuple
import calendar
# Constants
MONTHS = [month for month in calendar.month_name if month]
DAY_NAMES = list(calendar.day_name)
# FIGURE 1
def get_fig_one_data(data):
"""Return a namedTuple with data for figure one."""
fig1_data = namedtuple('fig1', 'x y_2019 y_2020 y_2021')
# subset data from clean dataset
noise_year = data[~((data.year.isin([2021]))
& (data.month.isin([5])))].groupby(['year', 'month'])['complaint_type']\
.count()\
.reset_index()\
.rename({'complaint_type': 'n_complains'}, axis=1)
def return_month(idx):
"""Returns month from calendar module corresponding the dt.month index"""
return calendar.month_name[idx]
# map int to month names
noise_year['month'] = noise_year.month.apply(lambda x: return_month(x))
# get y-data
complaints_2019 = noise_year[noise_year.year.isin([2019])].n_complains.tolist()
complaints_2020 = noise_year[noise_year.year.isin([2020])].n_complains.tolist()
complaints_2021 = noise_year[noise_year.year.isin([2021])].n_complains.tolist()
# create an instance of named tuple with the data
return fig1_data(MONTHS, complaints_2019, complaints_2020, complaints_2021)
# FIGURE 2
def get_fig_two_data(data):
"""Return a namedTuple with data for figure two."""
# Create a named tuple to store the data
fig2_data = namedtuple('fig2', 'x y_2019 y_2020')
# subset data
noise_borough = data[data.year.isin([2019, 2020])].groupby(['year', 'borough'])['complaint_type']\
.count()\
.reset_index()\
.rename({'complaint_type': 'n_complains'}, axis=1)
# get borough names
borough = [borough.capitalize() for borough in noise_borough.borough.unique()]
# get y-data
complaints_2019 = noise_borough[noise_borough.year.isin([2019])].n_complains
complaints_2020 = noise_borough[noise_borough.year.isin([2020])].n_complains
# create and return an instance of named tuple with the data
return fig2_data(borough, complaints_2019, complaints_2020)
# FIGURE 3
def get_fig_three_data(data):
"""Return a namedTuple with data for figure three."""
# set up the named tuple
fig3_data = namedtuple('fig3', 'x y_2019 y_2020')
# group by noise type
noise_type = data[data.year.isin([2019, 2020])].groupby(['complaint_type', 'year'])['agency']\
.count()\
.reset_index()\
.rename({'agency': 'n_complains'}, axis=1)
# # get the top 3 major sources in 2020
top3_noise_type_2020 = noise_type[noise_type.year.isin([2020])].sort_values('n_complains', ascending=False)\
.complaint_type[:3]
# get the top 3 noise complaints
top_noise_complaints = noise_type[noise_type.complaint_type.isin(top3_noise_type_2020)]
# get the x
complaint_type = top_noise_complaints.complaint_type.unique()
# get the y
complaints_2019 = top_noise_complaints[top_noise_complaints.year.isin([2019])].n_complains
complaints_2020 = top_noise_complaints[top_noise_complaints.year.isin([2020])].n_complains
return fig3_data(complaint_type, complaints_2019, complaints_2020)
# FIGURE 4
def get_fig_four_data(data):
"""Return a namedTuple with data for figure four."""
# set up the named tuple
namedTuple = namedtuple('weekday', ['y', 'x'])
# subset data
daily_noise = data[data.year.isin([2020])].groupby(['weekday', 'complaint_type'])['agency']\
.count()\
.reset_index()\
.rename({'agency': 'n_complaints'}, axis=1)
# discard Collection Truck Noise because number is negligable (n<=2)
daily_noise = daily_noise[~daily_noise.complaint_type.isin(['Collection Truck Noise'])]
# get total complains per type to normalize each day
total_daily_complaints = daily_noise.groupby(['weekday'])['n_complaints'].sum().to_list()
# store results
result = dict()
for complaint_type in daily_noise.complaint_type.unique():
name_complaint = complaint_type
# Get data for each weekday
weekday = daily_noise[daily_noise.complaint_type.isin([complaint_type])].weekday.to_list()
# Transform weekday index id to true name
weekday_name = [calendar.day_name[i] for i in weekday]
# Get a list with the number of complains normalized to total complains in each day
number_complaints_norm = (daily_noise[daily_noise.complaint_type.isin([complaint_type])].n_complaints\
/ total_daily_complaints * 100).apply(lambda x: round(x, 2)).to_list()
# Add an instance of a named tuple with the data to a dict
result[name_complaint] = namedTuple(weekday_name, number_complaints_norm)
return result
``` |
{
"source": "joseferncruz/udacity-ds-project3",
"score": 3
} |
#### File: udacity-ds-project3/models/train_classifier.py
```python
import sys
import pickle
from sqlalchemy import create_engine
import pandas as pd
import re
# for npl
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
STOP_WORDS_ENG = stopwords.words('english')
# for statistical learning
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split,\
GridSearchCV
from sklearn.pipeline import Pipeline,\
FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer,\
TfidfTransformer
from sklearn.ensemble import AdaBoostClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator,\
TransformerMixin
random_seed = 10
# Classes to extract new features
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""Transformer class to extract the starting verb in text."""
def starting_verb(self, text):
"""Return True if the first word is a Verb."""
# Extract a list of tokenized sentences
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
# Calculate the number of tokens
pos_tags = nltk.pos_tag(tokenize(sentence))
if not pos_tags:
return False
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
"""Fit the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
y : optional, array-like of shape (n_samples,) or (n_samples, n_targets)
Returns
-------
self : returns an instance of self.
"""
return self
def transform(self, X):
"""Transform the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
Returns
-------
X_tagged : pd.DataFrame
Transformed data.
"""
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
###############################################################################
class GetNumberTokens(BaseEstimator, TransformerMixin):
"""Transformer class to count the number of tokens in text."""
def get_number_tokens(self, text):
"""Return the number of tokens in a sentence.
Parameters
----------
text : str
Returns
-------
n_tokens : int
The number of tokens in text.
"""
# Extract a list of tokenized sentences
n_tokens = len(tokenize(text))
return n_tokens
def fit(self, X, y=None):
"""Fit the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
y : optional, array-like of shape (n_samples,) or (n_samples, n_targets)
Returns
-------
self : returns an instance of self.
"""
return self
def transform(self, X):
"""Transform the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
Returns
-------
X_tagged : pd.DataFrame
Transformed data.
"""
X_tagged = pd.Series(X).apply(self.get_number_tokens)
return pd.DataFrame(X_tagged)
###############################################################################
def load_data(database_filepath):
"""Load data from a sql database.
Parameters
----------
database_filepath : str
Filepath to the sql database.
Returns
-------
X : np.ndarray
y : pd.DataFrame
category_names : np.ndarray
"""
# Create engine to read sql database
engine = create_engine(f'sqlite:///{database_filepath}')
# load data from database
df = pd.read_sql_table('disaster_response', engine)
# Extract documents from df
X = df.loc[:, 'message'].values
# Extract target categories
y = df.iloc[:, 4:].astype('int')
# Get target category names
category_names = y.columns
return X, y, category_names
###############################################################################
def tokenize(text):
"""Case normalize, clean, tokenize and lemmatize text.
Parameters
----------
text : str
Returns
-------
tokens_lem : list
List of clean, normalized and lemmatized tokens.
"""
# Remove non-alphanumeric characters
text = re.sub(r'[^0-9a-zA-Z]', ' ', text)
# tokenization
tokens = word_tokenize(text)
# lemmanitization
lemmatizer = WordNetLemmatizer()
tokens_lem = [lemmatizer.lemmatize(token.strip().lower()) for token in tokens
if token not in STOP_WORDS_ENG]
return tokens_lem
###############################################################################
def build_model():
"""Build a classifier using sklearn pipelines and GridSearchCV.
Returns
-------
model_cv : model
"""
# Get the pipeline
pipe = Pipeline(
[('text_processing', FeatureUnion([('text_pipeline', Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())])),
('verb_extract', StartingVerbExtractor()),
('n_tokens', GetNumberTokens())])
),
('clf', MultiOutputClassifier(AdaBoostClassifier(random_state=random_seed)))
]
)
# Perform Cross Validation to extract the best estimator
# Set grid of parameters for cross validation
params_grid = {
'clf__estimator__n_estimators': [100, 200, 300],
'clf__estimator__learning_rate': [0.05, 0.35]
}
# Build the model using gridsearch cross validation
model_cv = GridSearchCV(pipe,
params_grid,
cv=3,
refit=True,
verbose=2,
n_jobs=-1,
return_train_score=True)
return model_cv
###############################################################################
def evaluate_model(model, X_test, y_test, category_names):
"""Evaluate model and prints the classification report for each category.
Parameters
----------
model : sklearn model
X_test : pd.DataFrame
y_test : pd.DataFrame
category_names : list
"""
# Make a prediction
y_pred = model.predict(X_test)
# Print classification report for each target category
print(classification_report(y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""Save model into a pickle file.
Parameters
----------
model : sklean model
model_filepath : str
"""
pickle.dump(model.best_estimator_, open(model_filepath, 'wb'))
def main():
"""Encapsulate the main entry point."""
if len(sys.argv) == 3:
# Parse command line filepaths
database_filepath, model_filepath = sys.argv[1:]
print(f'Loading data...\n DATABASE: {database_filepath}')
# Load data
X, y, category_names = load_data(database_filepath)
# Split data for training and testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, y_train)
print('Evaluating model...')
evaluate_model(model, X_test, y_test, category_names)
print(f'Saving model...\n MODEL: {model_filepath}')
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
``` |
{
"source": "joseffallman/ffpp",
"score": 3
} |
#### File: ffpp/example/example.py
```python
import asyncio
import logging
from ffpp.Printer import Printer
from ffpp.Discovery import getPrinters
# Activate module logger to output.
LOG = logging.getLogger("ffpp")
LOG.setLevel(logging.DEBUG)
out_handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(levelname)s : %(name)s : %(message)s'
)
out_handler.setFormatter(formatter)
LOG.addHandler(out_handler)
myPrinter: Printer
async def main():
# Run until CTRL+C i pressed
print("FlashForge Printer Protocol demo.")
print("press ctrl+c to exit.")
print("")
ip = None
loop = asyncio.get_running_loop()
printers = await getPrinters(loop, limit=1)
for name, host in printers:
ip = host
break
if ip is None:
print("Enter your printer ip:")
ip = input()
myPrinter = Printer(ip)
try:
await myPrinter.connect()
except TimeoutError:
print("Could'nt connect")
return
while True:
try:
await update_and_print(myPrinter)
await asyncio.sleep(10)
except asyncio.CancelledError:
return
except RuntimeError:
pass
async def update_and_print(myPrinter: Printer):
# Connect and updates Printer object.
try:
await myPrinter.update()
except ConnectionError:
print("Write or read error.")
return
except TimeoutError:
print("Connection Timeout.")
return
# Get the first extruder tool available
extruder = myPrinter.extruder_tools.get()
bed = myPrinter.bed_tools.get()
# Print some information about printer and temperature.
print("==================================================")
print(f"{myPrinter.machine_type} is {myPrinter.machine_status}")
print(
f"Printer name {myPrinter.machine_name} is now {myPrinter.move_mode}")
print(f"Status {myPrinter.status} print percent {myPrinter.print_percent}")
print(f"Extruder {extruder.name} is now {extruder.now}")
print(f"Bed {bed.name} is now {bed.now}")
if __name__ == "__main__":
try:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
except KeyboardInterrupt:
pass
```
#### File: src/ffpp/Discovery.py
```python
import asyncio
import socket
import struct
import logging
LOG = logging.getLogger(__name__)
class ffDiscoveryDatagramProtocol(asyncio.DatagramProtocol):
def __init__(
self,
message: str,
on_con_lost: asyncio.Future,
interface_addr: str,
limit: int
):
self.message = message
self.on_con_lost = on_con_lost
self.transport = None
self.interface_addr = interface_addr
self.limit = limit
self.data = []
self.received = 0
def connection_made(self, transport):
self.transport = transport
sock = self.transport.get_extra_info('socket')
sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("B", 4)
)
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.interface_addr)
)
# print('Send:', self.message)
self.transport.sendto(self.message.encode(), ("192.168.127.12", 19000))
def datagram_received(self, data: bytes, addr):
self.received += 1
try:
end = data.find(b'\x00')
name = data[:end].decode('utf-8', "ignore")
ip = addr[0]
except Exception:
return
self.data.append((name, ip))
if self.received == self.limit:
self.transport.close()
def error_received(self, exc):
pass
# print('Error received:', exc)
def connection_lost(self, exc):
try:
self.on_con_lost.set_result(True)
except asyncio.exceptions.InvalidStateError:
pass
def find_host_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
async def getPrinters(loop: asyncio.BaseEventLoop, limit: int = None, host_ip: str = None) -> tuple():
"""Search network for connected printers.
Args:
loop (asyncio.BaseEventLoop): EventLoop
limit ([int], optional): Stop search when limit is reached. Defaults to None.
Returns:
[tuple(str, str)]: return a list of available printers, as (name, ip)
"""
# Get a reference to the event loop as we plan to use
# low-level APIs.
# loop = asyncio.get_running_loop()
on_con_lost = loop.create_future()
message = "Hello World!"
if not host_ip:
host_ip = find_host_ip()
transport, ffDiscovery = await loop.create_datagram_endpoint(
lambda: ffDiscoveryDatagramProtocol(
message, on_con_lost, host_ip, limit),
local_addr=(host_ip, 8002)
)
try:
# Wait until limit or timeout is reached.
await asyncio.wait_for(
on_con_lost,
timeout=15
)
except Exception: # CancelError and TimeoutError
LOG.debug("FlashForge printer search timeout.")
# raise TimeoutError(e) from e
finally:
transport.close()
printers = ffDiscovery.data
for printer in printers:
LOG.debug("Printer online: %s - %s", printer[0], printer[1])
return printers
```
#### File: ffpp/tests/test_Network.py
```python
import socket
import typing
import unittest
from unittest import mock
import asyncio
from src.ffpp.Network import Network
PRINTER_IP = "192.168.50.64"
PRINTER_PORT = 8899
class TestNetworkClass(unittest.IsolatedAsyncioTestCase):
""" Class to test the behavior of the Network class
"""
def setUp(self):
self.network = Network(PRINTER_IP, 8899)
self.patch_con = mock.patch(
'src.ffpp.Network.asyncio',
new_callable=mock.AsyncMock
)
self.mock_con = self.patch_con.start()
self.mockReturnValue()
def tearDown(self):
self.patch_con.stop()
def mockReturnValue(
self,
returnMessage: typing.Union[typing.List[str], str] = []
):
# Mock socket connection.
reader = mock.AsyncMock()
# reader.read.return_value = returnMessage.encode('utf-8')
if type(returnMessage) is not list:
returnMessage = [returnMessage]
ret = []
for msg in returnMessage:
ret.append(msg.encode('utf-8'))
reader.read.side_effect = ret # returnMessage.encode('utf-8')
self.mock_con.wait_for.return_value = (
reader, # reader
mock.AsyncMock(), # writer
)
# @mock.patch('src.ffpp.Network.asyncio', new=mock.AsyncMock)
async def test_sendMessageUnconnectedConnectionTimeout_exceptionTimeoutError(self): # noqa
# Arrange
net = Network("")
self.mockReturnValue(["Hej", "Två"])
self.mock_con.wait_for.side_effect = TimeoutError
# Act
with self.assertRaises(TimeoutError):
await net.sendMessage("msg_to_send")
# Assert
# No exception occurred, test responseData
# self.assertEqual(mock_socket.socket().connect.call_count, 2)
# self.assertFalse(response)
class TestNetworkCommunicateWithPrinter(unittest.IsolatedAsyncioTestCase):
""" Class to test the communication with a real Flashforge printer."""
@classmethod
def setUpClass(cls):
# Test if response, else skip TestCase
try:
with socket.socket() as s:
s.settimeout(5)
s.connect((PRINTER_IP, PRINTER_PORT))
s.close()
except Exception:
cls.skipTest(
cls,
f"There is no printer at this ip: {PRINTER_IP}:{PRINTER_PORT}"
)
async def test_printerConnect_noException(self):
# Arrange
net = Network("192.168.0.32")
# Act & Assert...
with self.assertRaises(TimeoutError):
await net.connect()
# ("Unable to connect to printer.",
# "Check Power/IP/ethernet/wlan connection etc. ")
async def test_getInfofromPrinter_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendInfoRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M115 Received" in response,
"Wrong message from printer.")
async def test_getProgress_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendProgressRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M27 Received" in response,
"Wrong message from printer.")
async def test_getTemperature_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendTempRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M105 Received" in response,
"Wrong message from printer.")
async def test_getPosiotion_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendPositionRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M114 Received" in response,
"Wrong message from printer.")
async def test_getStatus_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendStatusRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M119 Received" in response,
"Wrong message from printer.")
async def test_getFileList_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
files = await net.sendGetFileNames()
# Assert
self.assertIsNotNone(net.responseData, "responseData is None")
self.assertTrue("CMD M661 Received" in net.responseData,
"Wrong message from printer.")
self.assertTrue(len(files) > 0, "There is no files on printer?")
async def test_setTemperature_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
net.sendControlRequest()
# Act
response = await net.sendSetTemperature(40)
response2 = await net.sendTempRequest()
# response2 = net.responseData
await asyncio.sleep(5)
await net.sendSetTemperature(0) # Restore temperature.
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M104 Received" in response,
"Wrong message from printer.")
self.assertIsNotNone(response2, "responseData is None")
# self.assertTrue("T0:40" in response2, "Temperature not set")
async def test_pausePrint_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendPauseRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M25 Received" in response,
"Wrong message from printer.")
async def test_continuePrint_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendContinueRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M24 Received" in response,
"Wrong message from printer.")
async def test_abortPrint_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
response = await net.sendAbortRequest()
# Assert
self.assertIsNotNone(response, "responseData is None")
self.assertTrue("CMD M26 Received" in response,
"Wrong message from printer.")
@unittest.skip("Only run this test manually")
async def test_testNewCommand_expectedResult(self):
# Arrange
net = Network(PRINTER_IP)
# Act
success = await net.sendMessage('~M129\r\n')
response = net.responseData
# Assert
self.assertTrue(success)
self.assertIsNotNone(response, "responseData is None")
``` |
{
"source": "Josef-Friedrich/audiorename.py",
"score": 2
} |
#### File: audiorename.py/audiorename/__init__.py
```python
import sys
from .args import parse_args, fields # noqa: F401
from .batch import Batch
from .job import Job
from .message import stats, job_info
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def execute(*argv):
"""Main function
:param list argv: The command line arguments specified as a list: e. g
:code:`['--dry-run', '.']`
"""
job = None
try:
args = parse_args(argv)
job = Job(args)
job.stats.counter.reset()
job.stats.timer.start()
if job.output.job_info:
job_info(job)
if job.dry_run:
job.msg.output('Dry run')
batch = Batch(job)
batch.execute()
job.stats.timer.stop()
if job.output.stats:
stats(job)
except KeyboardInterrupt:
if job:
job.stats.timer.stop()
if job.output.stats:
stats(job)
sys.exit(0)
``` |
{
"source": "Josef-Friedrich/audiorename",
"score": 3
} |
#### File: audiorename/test/test_args.py
```python
import unittest
import re
import audiorename
import helper
class TestCommandlineInterface(unittest.TestCase):
def test_help_short(self):
with self.assertRaises(SystemExit) as cm:
with helper.Capturing():
audiorename.execute('-h')
the_exception = cm.exception
self.assertEqual(str(the_exception), '0')
def test_help_long(self):
with self.assertRaises(SystemExit) as cm:
with helper.Capturing():
audiorename.execute('--help')
the_exception = cm.exception
self.assertEqual(str(the_exception), '0')
def test_without_arguments(self):
with self.assertRaises(SystemExit) as cm:
with helper.Capturing('stderr'):
audiorename.execute()
the_exception = cm.exception
self.assertEqual(str(the_exception), '2')
def test_without_mutually_exclusive(self):
with self.assertRaises(SystemExit) as cm:
with helper.Capturing('stderr') as output:
audiorename.execute('--copy', '--move', '.')
the_exception = cm.exception
self.assertEqual(str(the_exception), '2')
self.assertTrue('not allowed with argument' in ' '.join(output))
class TestVersion(unittest.TestCase):
def test_version(self):
with self.assertRaises(SystemExit):
with helper.Capturing() as output:
audiorename.execute('--version')
result = re.search('[^ ]* [^ ]*', output[0])
self.assertTrue(result)
class TestHelp(unittest.TestCase):
def setUp(self):
with self.assertRaises(SystemExit):
with helper.Capturing() as output:
audiorename.execute('--help')
self.output = '\n'.join(output)
def test_tmep(self):
self.assertTrue('%title{text}' in self.output)
def test_phrydy(self):
self.assertTrue('mb_releasegroupid' in self.output)
# album
def test_field_ar_classical_album(self):
self.assertTrue('ar_classical_album' in self.output)
def test_field_ar_combined_album(self):
self.assertTrue('ar_combined_album' in self.output)
self.assertTrue('“album” without' in self.output)
def test_field_ar_initial_album(self):
self.assertTrue('ar_initial_album' in self.output)
self.assertTrue('First character' in self.output)
# artist
def test_field_ar_initial_artist(self):
self.assertTrue('ar_initial_artist' in self.output)
self.assertTrue('First character' in self.output)
def test_field_ar_combined_artist(self):
self.assertTrue('ar_combined_artist' in self.output)
self.assertTrue('The first available' in self.output)
def test_field_ar_combined_artist_sort(self):
self.assertTrue('ar_combined_artist_sort' in self.output)
self.assertTrue('The first available' in self.output)
# composer
def test_field_ar_initial_composer(self):
self.assertTrue('ar_initial_composer' in self.output)
def test_field_ar_combined_composer(self):
self.assertTrue('ar_combined_composer' in self.output)
def test_field_ar_combined_disctrack(self):
self.assertTrue('ar_combined_disctrack' in self.output)
self.assertTrue('Combination of' in self.output)
def test_field_ar_classical_performer(self):
self.assertTrue('ar_classical_performer' in self.output)
def test_field_ar_classical_title(self):
self.assertTrue('ar_classical_title' in self.output)
def test_field_ar_classical_track(self):
self.assertTrue('ar_classical_track' in self.output)
def test_field_ar_combined_year(self):
self.assertTrue('ar_combined_year' in self.output)
self.assertTrue('First “original_year”' in self.output)
class TestArgsDefault(unittest.TestCase):
def setUp(self):
from audiorename.args import parse_args
from audiorename.args import ArgsDefault
self.default = ArgsDefault()
self.default.source = 'lol'
self.args = parse_args(['lol'])
# positional arguments
def test_source(self):
self.assertEqual(self.args.source, 'lol')
self.assertEqual(self.args.source, self.default.source)
# optional arguments
def test_album_complete(self):
self.assertEqual(self.args.album_complete, False)
self.assertEqual(self.args.album_complete, self.default.album_complete)
def test_album_min(self):
self.assertEqual(self.args.album_min, False)
self.assertEqual(self.args.album_min, self.default.album_min)
def test_backup(self):
self.assertEqual(self.args.backup, False)
self.assertEqual(self.args.backup, self.default.backup)
def test_backup_folder(self):
self.assertEqual(self.args.backup_folder, False)
self.assertEqual(self.args.backup_folder, self.default.backup_folder)
def test_best_format(self):
self.assertEqual(self.args.best_format, False)
self.assertEqual(self.args.best_format, self.default.best_format)
def test_classical(self):
self.assertEqual(self.args.classical, False)
self.assertEqual(self.args.classical, self.default.classical)
def test_color(self):
self.assertEqual(self.args.color, False)
self.assertEqual(self.args.color, self.default.color)
def test_compilation(self):
self.assertEqual(self.args.compilation, False)
self.assertEqual(self.args.compilation, self.default.compilation)
def test_copy(self):
self.assertEqual(self.args.copy, False)
self.assertEqual(self.args.copy, self.default.copy)
def test_debug(self):
self.assertEqual(self.args.debug, False)
self.assertEqual(self.args.debug, self.default.debug)
def test_delete(self):
self.assertEqual(self.args.delete, False)
self.assertEqual(self.args.delete,
self.default.delete)
def test_dry_run(self):
self.assertEqual(self.args.dry_run, False)
self.assertEqual(self.args.dry_run, self.default.dry_run)
def test_enrich_metadata(self):
self.assertEqual(self.args.enrich_metadata, False)
self.assertEqual(self.args.enrich_metadata,
self.default.enrich_metadata)
def test_extension(self):
self.assertEqual(self.args.extension, 'mp3,m4a,flac,wma')
self.assertEqual(self.args.extension, self.default.extension)
def test_field_skip(self):
self.assertEqual(self.args.field_skip, False)
self.assertEqual(self.args.field_skip, self.default.field_skip)
def test_format(self):
self.assertEqual(self.args.format, False)
self.assertEqual(self.args.format, self.default.format)
def test_job_info(self):
self.assertEqual(self.args.job_info, False)
self.assertEqual(self.args.job_info, self.default.job_info)
def test_no_rename(self):
self.assertEqual(self.args.no_rename, False)
self.assertEqual(self.args.no_rename, self.default.no_rename)
def test_mb_track_listing(self):
self.assertEqual(self.args.mb_track_listing, False)
self.assertEqual(self.args.mb_track_listing,
self.default.mb_track_listing)
def test_move(self):
self.assertEqual(self.args.move, False)
self.assertEqual(self.args.move, self.default.move)
def test_one_line(self):
self.assertEqual(self.args.one_line, False)
self.assertEqual(self.args.one_line, self.default.one_line)
def test_remap_classical(self):
self.assertEqual(self.args.remap_classical, False)
self.assertEqual(self.args.remap_classical,
self.default.remap_classical)
def test_shell_friendly(self):
self.assertEqual(self.args.shell_friendly, False)
self.assertEqual(self.args.shell_friendly, self.default.shell_friendly)
def test_soundtrack(self):
self.assertEqual(self.args.soundtrack, False)
self.assertEqual(self.args.soundtrack, self.default.soundtrack)
def test_source_as_target(self):
self.assertEqual(self.args.source_as_target, False)
self.assertEqual(self.args.source_as_target,
self.default.source_as_target)
def test_target(self):
self.assertEqual(self.args.target, '')
self.assertEqual(self.args.target, self.default.target)
def test_stats(self):
self.assertEqual(self.args.stats, False)
self.assertEqual(self.args.stats, self.default.stats)
def test_verbose(self):
self.assertEqual(self.args.verbose, False)
self.assertEqual(self.args.verbose, self.default.verbose)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Josef-Friedrich/dyndns",
"score": 2
} |
#### File: Josef-Friedrich/dyndns/setup.py
```python
import os
from setuptools import setup, find_packages
import versioneer
def read(file_name):
"""
Read the contents of a text file and return its content.
:param str file_name: The name of the file to read.
:return: The content of the text file.
:rtype: str
"""
return open(
os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8'
).read()
setup(
name="dyndns",
author="<NAME>",
author_email="<EMAIL>",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="A simple dynamic DNS HTTP based update API using Python and the Flask web framework.",
long_description=read('README.rst'),
url="https://github.com/Josef-Friedrich/dyndns",
packages=find_packages(),
install_requires=[
'dnspython==2.1.0',
'docutils==0.16',
'flask==1.1.2',
'Pygments==2.7.4',
'PyYAML==5.4.1',
],
tests_require=[
'beautifulsoup4',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'dyndns-debug = dyndns.cli:main',
],
},
package_data={
'': ['rst/*.rst', 'templates/*.html', 'static/*'],
},
)
``` |
{
"source": "Josef-Friedrich/jflib",
"score": 4
} |
#### File: jflib/jflib/argparser_to_readme.py
```python
def argparser_to_readme(argparser, template='README-template.md',
destination='README.md', indentation=0,
placeholder='{{ argparse }}'):
"""Add the formatted help output of a command line utility using the
Python module `argparse` to a README file.
:param object argparser: The argparse parser object.
:param str template: The path of a template text file containing the
placeholder. Default: `README-template.md`
:param str destination: The path of the destination file. Default:
`README.me`
:param int indentation: Indent the formatted help output by X spaces.
Default: 0
:param str placeholder: Placeholder string that gets replaced by the
formatted help output. Default: `{{ argparse }}`
"""
help_string = argparser().format_help()
if indentation > 0:
indent_lines = []
lines = help_string.split('\n')
for line in lines:
indent_lines.append(' ' * indentation + line)
help_string = '\n'.join(indent_lines)
with open(template, 'r', encoding='utf-8') as template_file:
template_string = template_file.read()
readme = template_string.replace(placeholder, help_string)
readme_file = open(destination, 'w')
readme_file.write(readme)
readme_file.close()
```
#### File: jflib/test/test_utils.py
```python
import unittest
from jflib.utils import download, make_executable
import tempfile
import os
import stat
class TestUtils(unittest.TestCase):
def test_download(self):
url = 'https://raw.githubusercontent.com/' \
'Josef-Friedrich/jflib/master/README.md'
dest = tempfile.mkstemp()[1]
download(url, dest)
self.assertTrue(os.path.exists(dest))
with open(dest, 'r') as dest_file:
content = dest_file.read()
self.assertIn('# jflib', content)
def test_make_executable(self):
tmp = tempfile.mkstemp()
tmp_file = tmp[1]
with open(tmp_file, 'w') as tmp_fd:
tmp_fd.write('test')
self.assertFalse(stat.S_IXUSR & os.stat(tmp_file)[stat.ST_MODE])
make_executable(tmp_file)
self.assertTrue(stat.S_IXUSR & os.stat(tmp_file)[stat.ST_MODE])
``` |
{
"source": "Josef-Friedrich/kodi-addons",
"score": 2
} |
#### File: kodi-addons/plugin.video.example/main.py
```python
import sys
from urllib import urlencode
from urlparse import parse_qsl
import xbmcgui
import xbmcplugin
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Free sample videos are provided by www.vidsplay.com
# Here we use a fixed set of properties simply for demonstrating purposes
# In a "real life" plugin you will need to get info and links to video files/streams
# from some web-site or online service.
VIDEOS = {'Animals': [{'name': 'Crab',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/crab-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/crab.mp4',
'genre': 'Animals'},
{'name': 'Alligator',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/alligator-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/alligator.mp4',
'genre': 'Animals'},
{'name': 'Turtle',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/turtle-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/turtle.mp4',
'genre': 'Animals'}
],
'Cars': [{'name': 'Postal Truck',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/us_postal-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/us_postal.mp4',
'genre': 'Cars'},
{'name': 'Traffic',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic1-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic1.mp4',
'genre': 'Cars'},
{'name': 'Traffic Arrows',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic_arrows-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic_arrows.mp4',
'genre': 'Cars'}
],
'Food': [{'name': 'Chicken',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbq_chicken-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbqchicken.mp4',
'genre': 'Food'},
{'name': 'Hamburger',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger.mp4',
'genre': 'Food'},
{'name': 'Pizza',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza.mp4',
'genre': 'Food'}
]}
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or API.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: types.GeneratorType
"""
return VIDEOS.iterkeys()
def get_videos(category):
"""
Get the list of videofiles/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or API.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return VIDEOS[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, 'My Video Collection')
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category)
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': VIDEOS[category][0]['thumb'],
'icon': VIDEOS[category][0]['thumb'],
'fanart': VIDEOS[category][0]['thumb']})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14
# 'mediatype' is needed for a skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': category,
'genre': category,
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=Animals
url = get_url(action='listing', category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through videos.
for video in videos:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video['name'])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video['name'],
'genre': video['genre'],
'mediatype': 'video'})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video['thumb'], 'icon': video['thumb'], 'fanart': video['thumb']})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&video=http://www.vidsplay.com/wp-content/uploads/2017/04/crab.mp4
url = get_url(action='play', video=video['video'])
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
``` |
{
"source": "Josef-Friedrich/mscx-muggling",
"score": 3
} |
#### File: mscx-muggling/mscxyz/lyrics.py
```python
from mscxyz.score_file_classes import MscoreXmlTree
import lxml.etree as etree
import typing
class MscoreLyricsInterface(MscoreXmlTree):
def __init__(self, relpath: str):
super(MscoreLyricsInterface, self).__init__(relpath)
self.lyrics = self.normalize_lyrics()
self.max = self.get_max()
def normalize_lyrics(self):
"""Normalize numbering of verses to natural numbering (1,2,3).
From
.. code-block:: xml
<Lyrics>
<text>1. la</text>
</Lyrics>
<Lyrics>
<no>1</no>
<style>Lyrics Even Lines</style>
<text>2. li</text>
</Lyrics>
<Lyrics>
<no>2</no>
<text>3. lo</text>
</Lyrics>
To
.. code-block:: python
[
{'number': 1, 'element': lyrics_tag},
{'number': 2, 'element': lyrics_tag},
{'number': 3, 'element': lyrics_tag},
]
"""
lyrics = []
for lyric in self.xml_tree.findall('.//Lyrics'):
safe = {}
safe['element'] = lyric
number = lyric.find('no')
if hasattr(number, 'text'):
no = int(number.text) + 1
else:
no = 1
safe['number'] = no
lyrics.append(safe)
return lyrics
def get_max(self):
"""Retrieve the number of verses.
From:
1. La
2. La
3. La
To:
3
"""
max_lyric = 0
for element in self.lyrics:
if element['number'] > max_lyric:
max_lyric = element['number']
return max_lyric
def remap(self, remap_string: str, mscore: bool = False):
for pair in remap_string.split(','):
old = pair.split(':')[0]
new = pair.split(':')[1]
for element in self.lyrics:
if element['number'] == int(old):
element['element'].find('no').text = str(int(new) - 1)
self.save(mscore)
def extract_one_lyrics_verse(self, number: int, mscore: bool = False):
"""Extract a lyric verse by verse number.
:param number: The number of the lyrics verse starting by 1
"""
score = MscoreLyricsInterface(self.relpath)
for element in score.lyrics:
tag = element['element']
if element['number'] != number:
tag.getparent().remove(tag)
elif number != 1:
tag.find('no').text = '0'
ext = '.' + self.extension
new_name = score.relpath.replace(ext, '_' + str(number) + ext)
score.save(new_name, mscore)
def extract_lyrics(self, number: typing.Union[int, str] = None,
mscore: bool = False):
"""Extract one lyric verse or all lyric verses.
:param mixed number: The lyric verse number or 'all'
"""
if number == 'all':
for n in range(1, self.max + 1):
self.extract_one_lyrics_verse(n)
else:
self.extract_one_lyrics_verse(int(number))
def fix_lyrics_verse(self, verse_number: int):
"""
from:
.. code-block:: xml
<Lyrics>
<text>la-</text>
</Lyrics>
<Lyrics>
<syllabic>end</syllabic>
<text>la-</text>
</Lyrics>
<Lyrics>
<text>la.</text>
</Lyrics>
to:
.. code-block:: xml
<Lyrics>
<syllabic>begin</syllabic>
<text>la</text>
</Lyrics>
<Lyrics>
<syllabic>middle</syllabic>
<text>la</text>
</Lyrics>
<Lyrics>
<syllabic>end</syllabic>
<text>la.</text>
</Lyrics>
"""
syllabic = False
for element in self.lyrics:
if element['number'] == verse_number:
tag = element['element']
tag_text = tag.find('text')
text = tag_text.text
tag_syl = etree.Element('syllabic')
if text.endswith('-'):
tag_text.text = text[:-1]
if not syllabic:
tag_syl.text = 'begin'
syllabic = True
else:
tag_syl.text = 'middle'
else:
if syllabic:
tag_syl.text = 'end'
syllabic = False
else:
tag_syl = False
if not isinstance(tag_syl, bool):
tag.append(tag_syl)
def fix_lyrics(self, mscore: bool = False):
for verse_number in range(1, self.max + 1):
self.fix_lyrics_verse(verse_number)
self.save(mscore=False)
```
#### File: mscx-muggling/mscxyz/meta.py
```python
from mscxyz.score_file_classes import MscoreXmlTree
from mscxyz.utils import get_args, color
import json
import lxml
import re
import tmep
import typing
class ReadOnlyFieldError(Exception):
def __init__(self, field: str):
self.msg = 'The field “{}” is read only!'.format(field)
Exception.__init__(self, self.msg)
class UnkownFieldError(Exception):
def __init__(self, field: str, valid_fields: typing.Sequence):
self.msg = 'Unkown field of name “{}”! Valid field names are: {}' \
.format(field, ', '.join(valid_fields))
Exception.__init__(self, self.msg)
class UnmatchedFormatStringError(Exception):
def __init__(self, format_string: str, input_string: str):
self.msg = 'Your format string “{}” doesn’t match on this ' \
'input string: “{}”'.format(format_string, input_string)
Exception.__init__(self, self.msg)
class FormatStringNoFieldError(Exception):
def __init__(self, format_string: str):
self.msg = 'No fields found in your format string “{}”!' \
.format(format_string)
Exception.__init__(self, self.msg)
def distribute_field(source, format_string: str):
fields = re.findall(r'\$([a-z_]*)', format_string)
if not fields:
raise FormatStringNoFieldError(format_string)
regex = re.sub(r'\$[a-z_]*', '(.*)', format_string)
match = re.search(regex, source)
if not match:
raise UnmatchedFormatStringError(format_string, source)
values = match.groups()
return dict(zip(fields, values))
def to_underscore(field):
return re.sub('([A-Z]+)', r'_\1', field).lower()
def export_to_dict(obj, fields):
out = {}
for field in fields:
value = getattr(obj, field)
if not value:
value = ''
out[field] = value
return out
class MetaTag(object):
"""The available metaTag fields are:
* `arranger`
* `composer`
* `copyright`
* `creationDate`
* `lyricist`
* `movementNumber`
* `movementTitle`
* `platform`
* `poet`
* `source`
* `translator`
* `workNumber`
* `workTitle`
"""
fields = (
'arranger',
'composer',
'copyright',
'creationDate',
'lyricist',
'movementNumber',
'movementTitle',
'platform',
'poet',
'source',
'translator',
'workNumber',
'workTitle',
)
@staticmethod
def _to_camel_case(field):
return re.sub(r'(?!^)_([a-zA-Z])',
lambda match: match.group(1).upper(), field)
def __init__(self, xml_root):
self.xml_root = xml_root
def _get_element(self, field: str):
for element in self.xml_root.xpath('//metaTag[@name="' + field + '"]'):
return element
def _get_text(self, field: str) -> str:
element = self._get_element(field)
if hasattr(element, 'text'):
return element.text
def __getattr__(self, field):
field = self._to_camel_case(field)
if field not in self.fields:
raise UnkownFieldError(field, self.fields)
else:
return self._get_text(field)
def __setattr__(self, field, value):
if field == 'xml_root' or field == 'fields':
self.__dict__[field] = value
else:
field = self._to_camel_case(field)
self._get_element(field).text = value
def clean(self):
fields = (
'arranger',
'copyright',
'creationDate',
'movementNumber',
'platform',
'poet',
'source',
'translator',
'workNumber',
)
for field in fields:
setattr(self, field, '')
class Vbox(object):
"""The first vertical box of a score.
Available fields:
* `Composer`
* `Lyricist`
* `Subtitle`
* `Title`
.. code-block:: xml
<Staff id="1">
<VBox>
<height>10</height>
<Text>
<style>Title</style>
<text>Title</text>
</Text>
<Text>
<style>Composer</style>
<text>Composer</text>
</Text>
</VBox>
</Staff>
"""
fields = (
'Composer',
'Lyricist',
'Subtitle',
'Title',
)
def __init__(self, xml_root):
self.xml_root = xml_root
xpath = '/museScore/Score/Staff[@id="1"]'
if not xml_root.xpath(xpath + '/VBox'):
vbox = lxml.etree.Element('VBox')
height = lxml.etree.SubElement(vbox, 'height')
height.text = '10'
for element in xml_root.xpath(xpath):
element.insert(0, vbox)
def _get_tag(self, style):
"""
:param string style: String inside the `<style>` tags
"""
for element in self.xml_root.xpath('//VBox/Text'):
if element.find('style').text == style:
return element.find('text')
def _get_text(self, style):
"""
:param string style: String inside the `<style>` tags
"""
element = self._get_tag(style)
if hasattr(element, 'text'):
return element.text
def __getattr__(self, field):
field = field.title()
if field not in self.fields:
raise UnkownFieldError(field, self.fields)
else:
return self._get_text(field)
def _create_text_tag(self, style, text):
"""
:param string style: String inside the `<style>` tags
"""
Text_tag = lxml.etree.Element('Text')
style_tag = lxml.etree.SubElement(Text_tag, 'style')
style_tag.text = style
text_tag = lxml.etree.SubElement(Text_tag, 'text')
text_tag.text = text
for element in self.xml_root.xpath('//VBox'):
element.append(Text_tag)
def _set_text(self, style, text):
"""
:param string style: String inside the `<style>` tags
"""
element = self._get_tag(style)
if hasattr(element, 'text'):
element.text = text
else:
self._create_text_tag(style, text)
def __setattr__(self, field, value):
if field == 'xml_root' or field == 'fields':
self.__dict__[field] = value
elif field.title() not in self.fields:
raise UnkownFieldError(field, self.fields)
else:
self._set_text(field.title(), value)
class Combined(MscoreXmlTree):
fields = (
'composer',
'lyricist',
'subtitle',
'title',
)
def __init__(self, xml_root):
self.xml_root = xml_root
self.metatag = MetaTag(xml_root)
self.vbox = Vbox(xml_root)
def _pick_value(self, *values):
for value in values:
if value:
return value
@property
def title(self):
return self._pick_value(self.vbox.Title, self.metatag.workTitle)
@title.setter
def title(self, value):
self.vbox.Title = self.metatag.workTitle = value
@property
def subtitle(self):
return self._pick_value(self.vbox.Subtitle, self.metatag.movementTitle)
@subtitle.setter
def subtitle(self, value):
self.vbox.Subtitle = self.metatag.movementTitle = value
@property
def composer(self):
return self._pick_value(self.vbox.Composer, self.metatag.composer)
@composer.setter
def composer(self, value):
self.vbox.Composer = self.metatag.composer = value
@property
def lyricist(self):
return self._pick_value(self.vbox.Lyricist, self.metatag.lyricist)
@lyricist.setter
def lyricist(self, value):
self.vbox.Lyricist = self.metatag.lyricist = value
class InterfaceReadWrite(object):
objects = ('metatag', 'vbox', 'combined')
def __init__(self, xml_root):
self.metatag = MetaTag(xml_root)
self.vbox = Vbox(xml_root)
self.combined = Combined(xml_root)
self.fields = self.get_all_fields()
@staticmethod
def get_all_fields():
fields = []
for field in MetaTag.fields:
fields.append('metatag_' + to_underscore(field))
for field in Vbox.fields:
fields.append('vbox_' + field.lower())
for field in Combined.fields:
fields.append('combined_' + field)
return sorted(fields)
@staticmethod
def _split(field):
match = re.search(r'([^_]*)_(.*)', field)
if not match:
raise ValueError('Field “' + field + '” can’t be splitted!')
matches = match.groups()
if not matches[0] in InterfaceReadWrite.objects:
raise ValueError(matches[0] + ': Not a supported object!')
return {'object': matches[0], 'field': matches[1]}
def export_to_dict(self):
return export_to_dict(self, self.fields)
def __getattr__(self, field):
parts = self._split(field)
obj = getattr(self, parts['object'])
return getattr(obj, parts['field'])
def __setattr__(self, field, value):
if field in ('fields', 'metatag', 'objects', 'vbox', 'combined'):
self.__dict__[field] = value
else:
parts = self._split(field)
obj = getattr(self, parts['object'])
return setattr(obj, parts['field'], value)
class InterfaceReadOnly(object):
fields = [
'readonly_abspath',
'readonly_basename',
'readonly_dirname',
'readonly_extension',
'readonly_filename',
'readonly_relpath',
'readonly_relpath_backup',
]
def __init__(self, tree):
self.xml_tree = tree
@property
def readonly_abspath(self):
return self.xml_tree.abspath
@property
def readonly_basename(self):
return self.xml_tree.basename
@property
def readonly_dirname(self):
return self.xml_tree.dirname
@property
def readonly_extension(self):
return self.xml_tree.extension
@property
def readonly_filename(self):
return self.xml_tree.filename
@property
def readonly_relpath(self):
return self.xml_tree.relpath
@property
def readonly_relpath_backup(self):
return self.xml_tree.relpath_backup
class Interface(object):
def __init__(self, tree):
self.xml_tree = tree
self.read_only = InterfaceReadOnly(tree)
self.read_write = InterfaceReadWrite(tree.xml_root)
self.fields = self.get_all_fields()
@staticmethod
def get_all_fields():
return sorted(InterfaceReadOnly.fields +
InterfaceReadWrite.get_all_fields())
def export_to_dict(self):
return export_to_dict(self, self.fields)
def __getattr__(self, field):
if re.match(r'^readonly_', field):
return getattr(self.read_only, field)
else:
return getattr(self.read_write, field)
def __setattr__(self, field, value):
if field in ('xml_tree', 'read_only', 'read_write', 'fields'):
self.__dict__[field] = value
elif not re.match(r'^readonly_', field):
return setattr(self.read_write, field, value)
else:
raise ReadOnlyFieldError(field)
class Meta(MscoreXmlTree):
def __init__(self, relpath):
super(Meta, self).__init__(relpath)
if not self.errors:
self.metatag = MetaTag(self.xml_root)
self.vbox = Vbox(self.xml_root)
self.combined = Combined(self.xml_root)
self.interface_read_write = InterfaceReadWrite(self.xml_root)
self.interface = Interface(self)
def sync_fields(self):
if not self.errors:
self.combined.title = self.combined.title
self.combined.subtitle = self.combined.subtitle
self.combined.composer = self.combined.composer
self.combined.lyricist = self.combined.lyricist
def distribute_field(self, source_fields, format_string):
source_fields = source_fields.split(',')
for source_field in source_fields:
try:
source = getattr(self.interface, source_field)
results = distribute_field(source, format_string)
if results:
for field, value in results.items():
setattr(self.interface, field, value)
return
except UnmatchedFormatStringError as error:
self.errors.append(error)
def write_to_log_file(self, log_file, format_string):
log = open(log_file, 'w')
log.write(tmep.parse(format_string, self.interface.export_to_dict()) +
'\n')
log.close()
def set_field(self, destination_field, format_string):
field_value = tmep.parse(format_string,
self.interface.export_to_dict())
setattr(self.interface, destination_field, field_value)
def clean(self, fields):
fields = fields[0]
if fields == 'all':
fields = self.interface_read_write.fields
else:
fields = fields.split(',')
for field in fields:
setattr(self.interface_read_write, field, '')
def delete_duplicates(self):
iface = self.interface
if iface.combined_lyricist == iface.combined_composer:
iface.combined_lyricist = ''
if not iface.combined_title and iface.combined_subtitle:
iface.combined_title = iface.combined_subtitle
if iface.combined_subtitle == iface.combined_title:
iface.combined_subtitle = ''
def show(self, pre, post):
args = get_args()
fields = list(self.interface.fields)
if args.general_verbose < 1:
fields.remove('readonly_abspath')
fields.remove('readonly_dirname')
fields.remove('readonly_extension')
fields.remove('readonly_filename')
fields.remove('readonly_relpath')
if args.general_verbose < 2:
fields.remove('readonly_relpath_backup')
for field in fields:
if (args.general_verbose == 0 and (pre[field] or post[field])) or \
args.general_verbose > 0:
if re.match(r'^combined_', field):
field_color = 'green'
elif re.match(r'^metatag_', field):
field_color = 'blue'
elif re.match(r'^readonly_', field):
field_color = 'red'
elif re.match(r'^vbox_', field):
field_color = 'cyan'
else:
field_color = 'white'
line = []
if pre[field]:
line.append('“{}”'.format(pre[field]))
if pre[field] != post[field]:
line.append('->')
line.append(color('“{}”'.format(post[field]), 'yellow'))
print('{}: {}'.format(color(field, field_color),
' '.join(line)))
def export_json(self):
data = {}
data['title'] = self.get('title')
output = open(self.relpath.replace(
'.' + self.extension, '.json'), 'w')
json.dump(data, output, indent=4)
output.close()
```
#### File: mscx-muggling/mscxyz/score_file_classes.py
```python
from mscxyz.utils import mscore, re_open
import lxml.etree # Needed for type hints
import fnmatch
import lxml
import os
import shutil
import string
import zipfile
import tempfile
def list_scores(path: str, extension: str = 'both', glob: str = None) -> list:
"""List all scores in path.
:param path: The path so search for score files.
:param extension: Possible values: “both”, “mscz” or “mscx”.
:param glob: A glob string, see fnmatch
"""
if not glob:
if extension == 'both':
glob = '*.msc[xz]'
elif extension in ('mscx', 'mscz'):
glob = '*.{}'.format(extension)
else:
raise ValueError('Possible values for the argument “extension” '
'are: “both”, “mscx”, “mscz”')
if os.path.isfile(path):
if fnmatch.fnmatch(path, glob):
return [path]
else:
return []
out = []
for root, _, scores in os.walk(path):
for score in scores:
if fnmatch.fnmatch(score, glob):
scores_path = os.path.join(root, score)
out.append(scores_path)
out.sort()
return out
def list_zero_alphabet() -> list:
"""Build a list: 0, a, b, c etc."""
score_dirs = ['0']
for char in string.ascii_lowercase:
score_dirs.append(char)
return score_dirs
###############################################################################
# Class hierarchy level 1
###############################################################################
class MscoreFile(object):
"""This class holds basic file properties of the MuseScore score file.
:param relpath: The relative (or absolute) path of a MuseScore
file.
"""
def __init__(self, relpath: str):
self.errors = []
"""A list to store errors."""
self.relpath = relpath
"""The relative path of the score file, for example:
``files_mscore2/simple.mscx``.
"""
self.abspath = os.path.abspath(relpath)
"""The absolute path of the score file, for example:
``/home/jf/test/files_mscore2/simple.mscx``."""
self.extension = relpath.split('.')[-1].lower()
"""The extension (``mscx`` or ``mscz``) of the score file, for
example: ``mscx``."""
self.relpath_backup = relpath.replace(
'.' + self.extension, '_bak.' + self.extension)
"""The backup path of the score file, for example:
``files_mscore2/simple_bak.mscx``."""
self.dirname = os.path.dirname(relpath)
"""The name of the containing directory of the MuseScore file, for
example: ``files_mscore2``."""
self.filename = os.path.basename(relpath)
"""The filename of the MuseScore file, for example:
``simple.mscx``."""
self.basename = self.filename.replace('.mscx', '')
"""The basename of the score file, for example: ``simple``."""
if self.extension == 'mscz':
self.loadpath = self._unzip(self.abspath)
"""The load path of the score file"""
else:
self.loadpath = self.abspath
@staticmethod
def _unzip(abspath: str):
tmp_zipdir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(abspath, 'r')
zip_ref.extractall(tmp_zipdir)
zip_ref.close()
con = os.path.join(tmp_zipdir, 'META-INF', 'container.xml')
container_info = lxml.etree.parse(con)
mscx = container_info \
.xpath('string(/container/rootfiles/rootfile/@full-path)')
return os.path.join(tmp_zipdir, mscx)
def backup(self):
"""Make a copy of the MuseScore file."""
shutil.copy2(self.relpath, self.relpath_backup)
def export(self, extension: str = 'pdf'):
"""Export the score to the specifed file type.
:param extension: The extension (default: pdf)
"""
score = self.relpath
mscore(['--export-to',
score.replace('.' + self.extension, '.' + extension), score])
###############################################################################
# Class hierarchy level 2
###############################################################################
class MscoreXmlTree(MscoreFile):
"""XML tree manipulation
:param relpath: The relative (or absolute) path of a MuseScore file.
"""
def __init__(self, relpath: str):
super(MscoreXmlTree, self).__init__(relpath)
try:
self.xml_tree = lxml.etree.parse(self.loadpath)
except lxml.etree.XMLSyntaxError as e:
self.errors.append(e)
else:
self.xml_root = self.xml_tree.getroot()
musescore = self.xml_tree.xpath('/museScore')
version = musescore[0].get('version')
self.version_major = int(version.split('.')[0])
"""The major MuseScore version, for example 2 or 3"""
self.version = float(version)
"""The MuseScore version, for example 2.03 or 3.01"""
def add_sub_element(self, root_tag, tag, text: str):
tag = lxml.etree.SubElement(root_tag, tag)
tag.text = text
def strip_tags(self, *tag_names: str):
"""Delete / strip some tag names."""
lxml.etree.strip_tags(self.xml_tree, tag_names)
def remove_tags_by_xpath(self, *xpath_strings: str):
"""Remove tags by xpath strings.
:param xpath_strings: A xpath string.
.. code:: Python
tree.remove_tags_by_xpath(
'/museScore/Score/Style', '//LayoutBreak', '//StemDirection'
)
"""
for xpath_string in xpath_strings:
for rm in self.xml_tree.xpath(xpath_string):
rm.getparent().remove(rm)
def merge_style(self, styles: str):
"""Merge styles into the XML tree.
:param styles: The path of the style file or a string containing
the XML style markup.
``styles`` may not contain surrounding ``<Style>`` tags. This input is
valid:
.. code :: XML
<TextStyle>
<halign>center</halign>
<valign>bottom</valign>
<xoffset>0</xoffset>
<yoffset>-1</yoffset>
<offsetType>spatium</offsetType>
<name>Form Section</name>
<family>Alegreya Sans</family>
<size>12</size>
<bold>1</bold>
<italic>1</italic>
<sizeIsSpatiumDependent>1</sizeIsSpatiumDependent>
<frameWidthS>0.1</frameWidthS>
<paddingWidthS>0.2</paddingWidthS>
<frameRound>0</frameRound>
<frameColor r="0" g="0" b="0" a="255"/>
</TextStyle>
This input is invalid:
.. code :: XML
<?xml version="1.0"?>
<museScore version="2.06">
<Style>
<TextStyle>
<halign>center</halign>
<valign>bottom</valign>
<xoffset>0</xoffset>
<yoffset>-1</yoffset>
<offsetType>spatium</offsetType>
<name>Form Section</name>
<family>Alegreya Sans</family>
<size>12</size>
<bold>1</bold>
<italic>1</italic>
<sizeIsSpatiumDependent>1</sizeIsSpatiumDependent>
<frameWidthS>0.1</frameWidthS>
<paddingWidthS>0.2</paddingWidthS>
<frameRound>0</frameRound>
<frameColor r="0" g="0" b="0" a="255"/>
</TextStyle>
</Style>
</museScore>
"""
if os.path.exists(styles):
style = lxml.etree.parse(styles).getroot()
else:
# <?xml ... tag without encoding to avoid this error:
# ValueError: Unicode strings with encoding declaration are
# not supported. Please use bytes input or XML fragments without
# declaration.
pre = '<?xml version="1.0"?><museScore version="2.06"><Style>'
post = '</Style></museScore>'
style = lxml.etree.XML(pre + styles + post)
for score in self.xml_tree.xpath('/museScore/Score'):
score.insert(0, style[0])
def clean(self):
"""Remove the style, the layout breaks, the stem directions and the
``font``, ``b``, ``i``, ``pos``, ``offset`` tags"""
self.remove_tags_by_xpath(
'/museScore/Score/Style', '//LayoutBreak', '//StemDirection')
self.strip_tags('font', 'b', 'i', 'pos', 'offset')
def save(self, new_name: str = '', mscore: bool = False):
"""Save the MuseScore file.
:param new_name: Save the MuseScore file under a new name.
:param mscore: Save the MuseScore file by opening it with the
MuseScore executable and save it there.
"""
if new_name:
filename = new_name
elif self.extension == 'mscz':
filename = self.loadpath
else:
filename = self.relpath
if not self.errors:
# To get the same xml tag structure as the original score file
# has.
for xpath in ('//LayerTag',
'//metaTag',
'//font',
'//i',
'//evenFooterL',
'//evenFooterC',
'//evenFooterR',
'//oddFooterL',
'//oddFooterC',
'//oddFooterR',
'//chord/name',
'//chord/render',
'//StaffText/text',
'//Jump/continueAt',
):
for tag in self.xml_tree.xpath(xpath):
if not tag.text:
tag.text = ''
score = open(filename, 'w')
score.write('<?xml version="1.0" encoding="UTF-8"?>\n')
score.write(lxml.etree.tostring(self.xml_root, encoding='UTF-8')
.decode('utf-8'))
score.write('\n')
score.close()
if self.extension == 'mscz':
# Need some tmp directory cleanup for working with mscz files
tmpdir = os.path.dirname(filename)
zip_ref = zipfile.ZipFile(self.abspath, 'w')
for root, _, files in os.walk(tmpdir):
for file in files:
arcname = os.path.join(root.replace(tmpdir, ""), file)
zip_ref.write(os.path.join(root, file), arcname)
if mscore:
re_open(filename)
###############################################################################
# Class hierarchy level 3
###############################################################################
class MscoreStyleInterface(MscoreXmlTree):
"""
Interface specialized for the style manipulation.
:param relpath: The relative (or absolute) path of a MuseScore file.
"""
def __init__(self, relpath: str):
super(MscoreStyleInterface, self).__init__(relpath)
styles = self.xml_tree.xpath('/museScore/Score/Style')
if styles:
self.style = styles[0]
"""The ``/museScore/Score/Style`` element object, see
https://lxml.de/tutorial.html#the-element-class
"""
else:
self.style = self._create_parent_style()
def _create_parent_style(self):
score = self.xml_tree.xpath('/museScore/Score')
return lxml.etree.SubElement(score[0], 'Style')
def _create(self, tag: str) -> lxml.etree.Element:
"""
:param tag: Nested tags are supported, for example ``TextStyle/halign``
"""
tags = tag.split('/')
parent = self.style
for tag in tags:
element = parent.find(tag)
if element is None:
parent = lxml.etree.SubElement(parent, tag)
else:
parent = element
return parent
def get_element(self, element_path: str,
create: bool = False) -> lxml.etree.Element:
"""
Get a lxml element which is parent to the ``Style`` tag.
:param element_path: see
http://lxml.de/tutorial.html#elementpath
:param create: Create the element if not present in the parent
``Style`` tag.
Example code:
.. code:: Python
# Set attributes on a maybe non-existent style tag.
# <measureNumberOffset x="0.5" y="-2"/>
test = MscoreStyleInterface('text.mscx')
element = test.get_element('measureNumberOffset', create=True)
element.attrib['x'] = '0.5'
element.attrib['y'] = '-2'
test.save()
"""
element = self.style.find(element_path)
if element is None and create:
element = self._create(element_path)
return element
def get_value(self, element_path: str) -> str:
"""
Get the value (text) of a style tag.
:param element_path: see
http://lxml.de/tutorial.html#elementpath
"""
element = self.get_element(element_path)
return element.text
def set_attributes(self, element_path: str,
attributes: dict) -> lxml.etree.Element:
"""Set attributes on a style child tag.
:param element_path: see
http://lxml.de/tutorial.html#elementpath
"""
element = self.get_element(element_path, create=True)
for name, value in attributes.items():
element.attrib[name] = str(value)
return element
def set_value(self, element_path: str, value: str):
"""
:param element_path: see
http://lxml.de/tutorial.html#elementpath
"""
element = self.style.find(element_path)
if element is None:
element = self._create(element_path)
element.text = str(value)
def _get_text_style_element(self, name: str) -> lxml.etree.Element:
if self.version_major != 2:
raise ValueError(
'This operation is only allowed for MuseScore 2 score files'
)
xpath = '//TextStyle/name[contains(., "{}")]'.format(name)
child = self.xml_tree.xpath(xpath)
if child:
return child[0].getparent()
else:
el_text_style = lxml.etree.SubElement(self.style, 'TextStyle')
el_name = lxml.etree.SubElement(el_text_style, 'name')
el_name.text = name
return el_text_style
def get_text_style(self, name: str) -> dict:
"""Get text styles. Only MuseScore2!
:param name: The name of the text style.
"""
text_style = self._get_text_style_element(name)
out = {}
for child in text_style.iterchildren():
out[child.tag] = child.text
return out
def set_text_style(self, name: str, values: dict):
"""Set text styles. Only MuseScore2!
:param name: The name of the text style.
:param values: A dictionary. The keys are the tag names, values are
the text values of the child tags, for example
``{size: 14, bold: 1}``.
"""
text_style = self._get_text_style_element(name)
for element_name, value in values.items():
el = text_style.find(element_name)
if el is None:
el = lxml.etree.SubElement(text_style, element_name)
el.text = str(value)
```
#### File: mscx-muggling/test/test_command_line_interface.py
```python
import subprocess
import unittest
class TestCli(unittest.TestCase):
def test_cli(self):
output = subprocess.check_output(('mscx-manager', '--help'))
self.assertTrue('usage: mscx-manager' in str(output))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Josef-Friedrich/mutagen-renamer",
"score": 3
} |
#### File: mutagen-renamer/audiorename/batch.py
```python
import os
import typing
from phrydy import MediaFileExtended
from .job import Job
from .audiofile import do_job_on_audiofile, mb_track_listing
class VirtualAlbum:
title: str
track: int
path: str
def __init__(self, title: str, track: int, path: str) -> None:
self.title = title
self.track = track
self.path = path
class Batch:
"""This class first sorts all files and then walks through all files. In
this process it tries to make bundles of files belonging to an album. This
bundle of files is temporary stored in the attribute `virtual_album`. This
complicated mechanism is needed for the two filters `album_complete` and
`album_min`.
"""
virtual_album: typing.List[VirtualAlbum] = []
"""Storage of a list of files belonging to an album."""
current_album_title: str = ''
"""Storage for the album title of the current audio file."""
job: Job
bundle_filter: bool
def __init__(self, job: Job):
self.job = job
self.bundle_filter = job.filters.album_complete or \
isinstance(job.filters.album_min, int)
def check_extension(self, path: str) -> bool:
"""Check the extension of the track.
:params str path: The path of the tracks.
"""
extension = self.job.filters.extension
extension = ['.' + e for e in extension]
if path.lower().endswith(tuple(extension)):
return True
else:
return False
def check_quantity(self):
"""Compare the number of tracks in an album with the minimal track
threshold.
"""
if isinstance(self.job.filters.album_min, int) and \
len(self.virtual_album) > int(self.job.filters.album_min):
return True
else:
return False
def check_completeness(self):
"""Check if the album is complete"""
max_track = 0
for album in self.virtual_album:
if album.track > max_track:
max_track = album.track
if len(self.virtual_album) == max_track:
return True
else:
return False
def process_album(self):
"""Check an album for quantity and completeness."""
quantity = True
completeness = True
if self.job.filters.album_min and not self.check_quantity():
quantity = False
if self.job.filters.album_complete and not self.check_completeness():
completeness = False
if quantity and completeness:
for album in self.virtual_album:
do_job_on_audiofile(album.path, job=self.job)
self.virtual_album = []
def make_bundles(self, path: str = ''):
"""
:params str path: The path of the tracks.
"""
if not path:
self.process_album()
return
try:
media = MediaFileExtended(path)
album = VirtualAlbum(media.album, media.track, path)
if not self.current_album_title or \
self.current_album_title != media.album:
self.current_album_title = media.album
self.process_album()
self.virtual_album.append(album)
except Exception:
pass
def execute(self):
"""Process all files of a given path or process a single file."""
mb_track_listing.counter = 0
if os.path.isdir(self.job.selection.source):
for path, dirs, files in os.walk(self.job.selection.source):
dirs.sort()
files.sort()
for file_name in files:
p = os.path.join(path, file_name)
if self.check_extension(p):
if self.bundle_filter:
self.make_bundles(p)
else:
do_job_on_audiofile(p, job=self.job)
# Process the last bundle left over
if self.bundle_filter:
self.make_bundles()
else:
p = self.job.selection.source
if self.check_extension(p):
do_job_on_audiofile(p, job=self.job)
```
#### File: mutagen-renamer/test/test_batch.py
```python
import unittest
import audiorename
import helper
class TestBatch(unittest.TestCase):
def setUp(self):
self.singles = helper.gen_file_list(
['album', 'compilation'],
helper.get_testfile('files'),
)
self.album_broken = helper.gen_file_list(
['01', '03', '05', '07', '09', '11'],
helper.get_testfile('files', 'album_broken'),
)
self.album_broken_all = helper.gen_file_list(
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],
helper.get_testfile('files', 'album_broken'),
)
self.album_complete = helper.gen_file_list(
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],
helper.get_testfile('files', 'album_complete'),
)
self.album_incomplete = helper.gen_file_list(
['01', '02', '04', '05', '06', '07', '09', '10', '11'],
helper.get_testfile('files', 'album_incomplete'),
)
self.album_small = helper.gen_file_list(
['01', '02', '03', '04', '05'],
helper.get_testfile('files', 'album_small'),
)
self.all = self.singles + \
self.album_broken_all + \
self.album_complete + \
self.album_incomplete + \
self.album_small
def test_single(self):
single = helper.get_testfile('files', 'album.mp3')
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--verbose', single)
self.assertEqual([single], helper.filter_source(output))
def test_folder_complete(self):
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--verbose',
helper.get_testfile('files'))
self.assertEqual(self.all, helper.filter_source(output))
def test_folder_sub(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
helper.get_testfile('files', 'album_complete')
)
self.assertEqual(self.album_complete, helper.filter_source(output))
def test_album_min(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'7',
helper.get_testfile('files')
)
self.assertEqual(self.album_complete + self.album_incomplete,
helper.filter_source(output))
def test_album_min_no_match(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'23',
helper.get_testfile('files')
)
self.assertEqual([], helper.filter_source(output))
def test_album_complete(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-complete',
helper.get_testfile('files')
)
self.assertEqual(
self.singles + self.album_complete + self.album_small,
helper.filter_source(output)
)
def test_filter_all(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'7',
'--album-complete',
helper.get_testfile('files')
)
self.assertEqual(self.album_complete, helper.filter_source(output))
class TestExtension(unittest.TestCase):
def setUp(self):
self.test_files = helper.get_testfile('mixed_formats')
def test_default(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
self.test_files,
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(
['01.flac', '02.m4a', '03.mp3'],
self.test_files,
extension=False
)
)
def test_one(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--extension',
'mp3,flac',
self.test_files
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(
['01.flac', '03.mp3'],
self.test_files,
extension=False
)
)
def test_two(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--extension',
'mp3',
self.test_files
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(['03.mp3'], self.test_files,
extension=False)
)
class TestSkip(unittest.TestCase):
def setUp(self):
self.file = helper.get_testfile('broken', 'binary.mp3')
with helper.Capturing() as output:
audiorename.execute('-d', '--verbose', self.file)
self.output = helper.join(output)
def test_message(self):
self.assertTrue('Broken file' in self.output)
def test_file_in_message(self):
self.assertTrue('Broken file' in self.output)
self.assertTrue(self.file in self.output)
def test_continuation(self):
path = helper.get_testfile('broken')
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
path
)
output = helper.filter_source(output)
self.assertTrue(output[1])
if __name__ == '__main__':
unittest.main()
```
#### File: mutagen-renamer/test/test_job.py
```python
import typing
from audiorename.job import Job, Timer, Counter
from audiorename.args import ArgsDefault
import unittest
import os
import helper
def job(**kwargs: typing.Any) -> Job:
return Job(ArgsDefault(**kwargs))
class TestJobWithArgParser(unittest.TestCase):
##
# [selection]
##
def test_source(self):
self.assertEqual(job(source='.').selection.source,
os.path.abspath('.'))
def test_target_default(self):
self.assertEqual(job(source='.').selection.target, os.getcwd())
def test_target(self):
self.assertEqual(job(target='test').selection.target,
os.path.abspath('test'))
def test_source_as_target(self):
self.assertEqual(job(source_as_target=True).selection.target,
os.getcwd())
##
# [rename]
##
def test_backup_folder(self):
self.assertEqual(job(backup_folder='/tmp').rename.backup_folder,
'/tmp')
def test_best_format(self):
self.assertEqual(job(best_format=True).rename.best_format, True)
def test_dry_run(self):
self.assertEqual(job(dry_run=True).rename.dry_run, True)
def test_move_action(self):
self.assertEqual(job(move_action='copy').rename.move_action, 'copy')
def test_cleaning_action(self):
self.assertEqual(
job(cleaning_action='backup').rename.cleaning_action, 'backup')
##
# [filters]
##
def test_album_complete(self):
self.assertEqual(job(album_complete=True).filters.album_min, None)
self.assertEqual(job(album_complete=True).filters.album_complete, True)
def test_album_min(self):
self.assertEqual(job(album_min=19).filters.album_min, 19)
self.assertEqual(job(album_min=19).filters.album_complete, False)
def test_extension(self):
self.assertEqual(job(extension='lol').filters.extension, ['lol'])
def test_field_skip(self):
self.assertEqual(job(field_skip='album').filters.field_skip, 'album')
##
# [template_settings]
##
def test_shell_friendly(self):
self.assertEqual(
job(shell_friendly=True).template_settings.shell_friendly, True)
##
# [cli_output]
##
def test_color(self):
self.assertEqual(job(color=True).cli_output.color, True)
def test_debug(self):
self.assertEqual(job(debug=True).cli_output.debug, True)
def test_job_info(self):
self.assertEqual(job(job_info=True).cli_output.job_info, True)
def test_mb_track_listing(self):
self.assertEqual(
job(mb_track_listing=True).cli_output.mb_track_listing, True)
def test_one_line(self):
self.assertEqual(job(one_line=True).cli_output.one_line, True)
def test_stats(self):
self.assertEqual(job(stats=True).cli_output.stats, True)
def test_verbose(self):
self.assertEqual(job(verbose=True).cli_output.verbose, True)
##
# [metadata_actions]
##
def test_enrich_metadata(self):
self.assertEqual(
job(enrich_metadata=True).metadata_actions.enrich_metadata, True)
def test_remap_classical(self):
self.assertEqual(
job(remap_classical=True).metadata_actions.remap_classical, True)
def get_config_path(config_file: str) -> str:
return helper.get_testfile('config', config_file)
def make_job_with_config(config_file: str) -> Job:
args = ArgsDefault()
args.config = [get_config_path(config_file)]
return Job(args)
class TestJobWithConfigParser(unittest.TestCase):
def setUp(self):
self.job = make_job_with_config('all-true.ini')
def test_minimal_config_file(self):
job = make_job_with_config('minimal.ini')
self.assertEqual(job.rename.backup_folder, '/tmp/minimal')
def test_multiple_config_files(self):
args = ArgsDefault()
args.config = [
get_config_path('all-true.ini'),
get_config_path('minimal.ini'),
]
job = Job(args)
self.assertEqual(job.rename.backup_folder, '/tmp/minimal')
self.assertEqual(job.filters.genre_classical, ['sonata', 'opera'])
def test_multiple_config_file_different_order(self):
args = ArgsDefault()
args.config = [
get_config_path('minimal.ini'),
get_config_path('all-true.ini'),
]
job = Job(args)
self.assertEqual(job.rename.backup_folder, '/tmp/backup')
def test_section_selection(self):
self.assertEqual(self.job.selection.source, '/tmp')
self.assertEqual(self.job.selection.target, '/tmp')
self.assertEqual(self.job.selection.source_as_target, True)
def test_section_rename(self):
self.assertEqual(self.job.rename.backup_folder, '/tmp/backup')
self.assertEqual(self.job.rename.best_format, True)
self.assertEqual(self.job.rename.dry_run, True)
self.assertEqual(self.job.rename.move_action, 'copy')
self.assertEqual(self.job.rename.cleaning_action, 'delete')
def test_section_filters(self):
self.assertEqual(self.job.filters.album_complete, True)
self.assertEqual(self.job.filters.album_min, 42)
self.assertEqual(self.job.filters.extension, ['wave', 'aiff'])
self.assertEqual(self.job.filters.genre_classical, ['sonata', 'opera'])
self.assertEqual(self.job.filters.field_skip, 'comment')
def test_section_template_settings(self):
self.assertEqual(self.job.template_settings.classical, True)
self.assertEqual(self.job.template_settings.shell_friendly, True)
self.assertEqual(self.job.template_settings.no_soundtrack, True)
def test_section_path_templates(self):
self.assertEqual(self.job.path_templates.default, 'classical')
self.assertEqual(self.job.path_templates.compilation, 'classical')
self.assertEqual(self.job.path_templates.soundtrack, 'classical')
self.assertEqual(self.job.path_templates.classical, 'classical')
def test_section_cli_output(self):
self.assertEqual(self.job.cli_output.color, True)
self.assertEqual(self.job.cli_output.debug, True)
self.assertEqual(self.job.cli_output.job_info, True)
self.assertEqual(self.job.cli_output.mb_track_listing, True)
self.assertEqual(self.job.cli_output.one_line, True)
self.assertEqual(self.job.cli_output.stats, True)
self.assertEqual(self.job.cli_output.verbose, True)
def test_section_metadata_actions(self):
self.assertEqual(self.job.metadata_actions.enrich_metadata, True)
self.assertEqual(self.job.metadata_actions.remap_classical, True)
class TestTimer(unittest.TestCase):
def setUp(self):
self.timer = Timer()
def get_result(self, begin: float, end: float) -> str:
self.timer.begin = begin
self.timer.end = end
return self.timer.result()
def test_method_start(self):
self.timer.start()
self.assertTrue(self.timer.begin > 0)
def test_method_stop(self):
self.timer.stop()
self.assertTrue(self.timer.end > 0)
def test_method_result(self):
self.assertEqual(self.get_result(10.3475, 14.594), '4.2s')
def test_method_result_large(self):
self.assertEqual(self.get_result(10, 145), '135.0s')
def test_method_result_small(self):
self.assertEqual(self.get_result(10.00001, 10.00002), '0.0s')
class TestCounter(unittest.TestCase):
def setUp(self):
self.counter = Counter()
def test_reset(self):
self.counter.count('lol')
self.counter.reset()
self.assertEqual(self.counter.get('lol'), 0)
def test_count(self):
self.counter.count('rename')
self.assertEqual(self.counter.get('rename'), 1)
self.counter.count('rename')
self.assertEqual(self.counter.get('rename'), 2)
def test_result(self):
self.counter.count('rename')
self.assertEqual(self.counter.result(),
'rename=1')
self.counter.count('no_field')
self.assertEqual(self.counter.result(),
'no_field=1 rename=1')
if __name__ == '__main__':
unittest.main()
```
#### File: mutagen-renamer/test/test_musicbrainz.py
```python
from audiorename.musicbrainz import \
query, query_works_recursively, set_useragent
import unittest
import helper
@unittest.skipIf(helper.SKIP_QUICK, 'Ignored, as it has to be done quickly.')
@unittest.skipIf(helper.SKIP_API_CALLS,
'Ignored if the API is not available.')
class TestEnrich(unittest.TestCase):
def setUp(self):
set_useragent()
def test_recording_pulp_01(self):
# ['soundtrack', 'Pulp-Fiction', '01.mp3']
result = query(
'recording',
'0480672d-4d88-4824-a06b-917ff408eabe',
)
self.assertEqual(result['id'],
'0480672d-4d88-4824-a06b-917ff408eabe')
def test_recording_mozart_01(self):
# ['classical', 'Mozart_Horn-concertos', '01.mp3']
result = query(
'recording',
'7886ad6c-11af-435b-8ec3-bca5711f7728',
)
self.assertEqual(result['work-relation-list'][0]['work']['id'],
'21fe0bf0-a040-387c-a39d-369d53c251fe')
def test_release_pulp_01(self):
# ['soundtrack', 'Pulp-Fiction', '01.mp3']
result = query(
'release',
'ab81edcb-9525-47cd-8247-db4fa969f525',
)
self.assertEqual(result['release-group']['id'],
'1703cd63-9401-33c0-87c6-50c4ba2e0ba8')
def test_release_mozart_01(self):
# ['classical', 'Mozart_Horn-concertos', '01.mp3'])
result = query(
'release',
'5ed650c5-0f72-4b79-80a7-c458c869f53e',
)
self.assertEqual(result['release-group']['id'],
'e1fa28f0-e56e-395b-82d3-a8de54e8c627')
def test_work_mozart_zauberfloete_unit(self):
# recording_id 6a0599ea-5c06-483a-ba66-f3a036da900a
# work_id eafec51f-47c5-3c66-8c36-a524246c85f8
# Akt 1: 5adc213f-700a-4435-9e95-831ed720f348
result = query_works_recursively(
'eafec51f-47c5-3c66-8c36-a524246c85f8', [])
self.assertEqual(result[0]['id'],
'eafec51f-47c5-3c66-8c36-a524246c85f8')
self.assertEqual(result[1]['id'],
'5adc213f-700a-4435-9e95-831ed720f348')
self.assertEqual(result[2]['id'],
'e208c5f5-5d37-3dfc-ac0b-999f207c9e46')
self.assertTrue('artist-relation-list' in result[2])
def test_work_kempff_transcription(self):
# work_id 4fba670e-3b8e-4ddf-a3a6-90817c94d6ce
result = query_works_recursively(
'4fba670e-3b8e-4ddf-a3a6-90817c94d6ce', [])
self.assertEqual(result[0]['id'],
'4fba670e-3b8e-4ddf-a3a6-90817c94d6ce')
self.assertEqual(len(result), 1)
``` |
{
"source": "Josef-Friedrich/path-macrotemplate",
"score": 2
} |
#### File: path-macrotemplate/test/test_functions.py
```python
import unittest
import tmep
class TestFunctions(unittest.TestCase):
def setUp(self):
self.values = {
'prename': 'Franz',
'lastname': 'Schubert',
'lol': 'lol',
'troll': 'troll',
'genres': 'Pop; Rock; Classical Crossover',
'asciify': 'gennemgår',
'track': 7,
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# alphanum
def test_alpha(self):
self.parseEqual('%alpha{abc123}', 'abc ')
def test_alpha_genres(self):
self.parseEqual('%alpha{$genres}', 'Pop Rock Classical Crossover')
# alphanum
def test_alphanum_accent(self):
self.parseEqual('%alphanum{après-évêque}', 'apres eveque')
def test_alphanum_genres(self):
self.parseEqual('%alphanum{$genres}', 'Pop Rock Classical Crossover')
def test_alphanum_many(self):
self.parseEqual('%alphanum{a"&(&b}', 'a b')
# asciify
def test_asciify_literal(self):
self.parseEqual('%asciify{après évêque}', 'apres eveque')
def test_asciify_variable(self):
self.parseEqual('%asciify{$asciify}', 'gennemgar')
def test_asciify_foreign(self):
self.parseEqual('%asciify{Новыя старонкі}', '<NAME>')
def test_asciify_german_umlaute(self):
self.parseEqual('%asciify{äÄöÖüÜ}', 'aeAeoeOeueUe')
def test_asciify_symbols_single(self):
self.parseEqual('%asciify{⚓}', '')
def test_asciify_symbols_multiple(self):
self.parseEqual('%asciify{⚢⚣⚤⚥⚦⚧⚨⚩}', '')
def test_asciify_symbols_mixed(self):
self.parseEqual('%asciify{a⚢b⚣⚤c}', 'abc')
# delchars
def test_delchars_single(self):
self.parseEqual('%delchars{x-x,-}', 'xx')
def test_delchars_multiple(self):
self.parseEqual('%delchars{x---x,-}', 'xx')
def test_delchars_no_match(self):
self.parseEqual('%delchars{x-x,_}', 'x-x')
def test_delchars_multiple_chars(self):
self.parseEqual('%delchars{x_-.x,_-.}', 'xx')
def test_delchars_unicode(self):
self.parseEqual('%delchars{öd,ö}', 'd')
def test_delchars_variable(self):
self.parseEqual('%delchars{$lastname,ue}', 'Schbrt')
# deldupchars
def test_deldupchars_default(self):
self.parseEqual('%deldupchars{a---b___c...d}', 'a-b_c.d')
def test_deldupchars_custom(self):
self.parseEqual('%deldupchars{a---b___c,-}', 'a-b___c')
def test_deldupchars_whitespace(self):
self.parseEqual('%deldupchars{a a, }', 'a a')
# first
def test_first(self):
self.parseEqual('%first{$genres}', 'Pop')
def test_first_skip(self):
self.parseEqual('%first{$genres,1,2}', 'Classical Crossover')
def test_first_different_sep(self):
self.parseEqual(
'%first{Alice / Bob / Eve,2,0, / , & }',
'Alice & Bob'
)
# if
def test_if_false(self):
self.parseEqual('x%if{,foo}', 'x')
def test_if_false_value(self):
self.parseEqual('x%if{false,foo}', 'x')
def test_if_true(self):
self.parseEqual('%if{bar,foo}', 'foo')
def test_if_else_false(self):
self.parseEqual('%if{,foo,baz}', 'baz')
def test_if_else_false_value(self):
self.parseEqual('%if{false,foo,baz}', 'baz')
def test_if_int_value(self):
self.parseEqual('%if{0,foo,baz}', 'baz')
# ifdef
def test_if_def_field_return_self(self):
self.parseEqual('%ifdef{lastname}', '')
def test_if_def_field_not_defined(self):
self.parseEqual('%ifdef{bar}', '')
def test_if_def_true(self):
self.parseEqual('%ifdef{lastname,Its true}', 'Its true')
def test_if_def_true_complete(self):
self.parseEqual('%ifdef{lastname,lol,troll}', 'lol')
def test_if_def_false_complete(self):
self.parseEqual('%ifdef{trill,lol,troll}', 'troll')
# initial
def test_initial_use_first_character(self):
self.parseEqual('%initial{abc}', 'a')
def test_initial_german_umlaut(self):
self.parseEqual('%initial{ä}', 'a')
def test_initial_special_characters(self):
self.parseEqual('%initial{-a-}', 'a')
def test_initial_nothing(self):
self.parseEqual('%initial{}', '_')
def test_initial_number(self):
self.parseEqual('%initial{3}', '0')
def test_initial_lower(self):
self.parseEqual('%initial{A}', 'a')
# left
def test_left_literal(self):
self.parseEqual('%left{Schubert, 3}', 'Sch')
def test_left_variable(self):
self.parseEqual('%left{$lastname, 3}', 'Sch')
# lower
def test_lower_literal(self):
self.parseEqual('%lower{SCHUBERT}', 'schubert')
def test_lower_variable(self):
self.parseEqual('%lower{$lastname}', 'schubert')
# nowhitespace
def test_nowhitespace(self):
self.parseEqual('%nowhitespace{$genres}',
'Pop;-Rock;-Classical-Crossover')
def test_nowhitespace_inline(self):
self.parseEqual('%nowhitespace{a b}', 'a-b')
def test_nowhitespace_multiple(self):
self.parseEqual('%nowhitespace{a b}', 'a-b')
def test_nowhitespace_newline_tab(self):
self.parseEqual('%nowhitespace{a\n\tb}', 'a-b')
def test_nowhitespace_replace_character(self):
self.parseEqual('%nowhitespace{a b,_}', 'a_b')
def test_nowhitespace_delete(self):
self.parseEqual('%nowhitespace{a b,}', 'ab')
# num
def test_num_literal(self):
self.parseEqual('%num{7,3}', '007')
def test_num_variable(self):
self.parseEqual('%num{$track,3}', '007')
def test_num_default_count(self):
self.parseEqual('%num{7}', '07')
def test_num_default_variable(self):
self.parseEqual('%num{$track}', '07')
# replchars
def test_replchars_literal(self):
self.parseEqual('%replchars{Schubert,-,ue}', 'Sch-b-rt')
def test_replchars_variable(self):
self.parseEqual('%replchars{$lastname,-,ue}', 'Sch-b-rt')
# right
def test_right_literal(self):
self.parseEqual('%right{Schubert,3}', 'ert')
def test_right_variable(self):
self.parseEqual('%right{$lastname,3}', 'ert')
# sanitize
def test_sanitize_literal(self):
self.parseEqual('%sanitize{x:*?<>|\/~&x}', 'xx') # noqa: W605
# shorten
def test_shorten_literal(self):
self.parseEqual('%shorten{Lorem ipsum dolor sit,10}', 'Lorem')
def test_shorten_default(self):
self.parseEqual(
'%shorten{Lorem ipsum dolor sit amet consectetur adipisicing}',
'Lorem ipsum dolor sit amet')
# title
def test_title_literal(self):
self.parseEqual('%title{franz schubert}', '<NAME>')
def test_title_variable(self):
self.parseEqual('%title{$lol $troll}', 'Lol Troll')
# upper
def test_upper_literal(self):
self.parseEqual('%upper{foo}', 'FOO')
def test_upper_variable(self):
self.parseEqual('%upper{$prename}', 'FRANZ')
#
def test_nonexistent_function(self):
self.parseEqual('%foo{bar}', '%foo{bar}')
class TestFunctionIfDefEmpty(unittest.TestCase):
def setUp(self):
self.values = {
'empty_string': '',
'false': False,
'non_empty_string': 'test',
'none': None,
'only_whitespaces': ' \t\n',
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# empty_string
def test_empty_string(self):
self.parseEqual('%ifdefempty{empty_string,trueval}', 'trueval')
# false
def test_false(self):
self.parseEqual('%ifdefempty{false,trueval}', 'trueval')
# non_empty_string
def test_non_empty_string(self):
self.parseEqual('%ifdefempty{non_empty_string,trueval,falseval}',
'falseval')
# nonexistent
def test_nonexistent(self):
self.parseEqual('%ifdefempty{nonexistent,trueval,falseval}',
'trueval')
# none
def test_none(self):
self.parseEqual('%ifdefempty{none,trueval}', 'trueval')
# nonexistent
def test_only_whitespaces(self):
self.parseEqual('%ifdefempty{only_whitespaces,trueval,falseval}',
'trueval')
class TestFunctionIfDefNotEmpty(unittest.TestCase):
def setUp(self):
self.values = {
'empty_string': '',
'false': False,
'non_empty_string': 'test',
'none': None,
'only_whitespaces': ' \t\n',
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# empty_string
def test_empty_string(self):
self.parseEqual('%ifdefnotempty{empty_string,trueval,falseval}',
'falseval')
# false
def test_false(self):
self.parseEqual('%ifdefnotempty{false,trueval,falseval}',
'falseval')
# non_empty_string
def test_non_empty_string(self):
self.parseEqual('%ifdefnotempty{non_empty_string,trueval,falseval}',
'trueval')
# nonexistent
def test_nonexistent(self):
self.parseEqual('%ifdefnotempty{nonexistent,trueval,falseval}',
'falseval')
# none
def test_none(self):
self.parseEqual('%ifdefnotempty{none,trueval,falseval}', 'falseval')
# nonexistent
def test_only_whitespaces(self):
self.parseEqual('%ifdefnotempty{only_whitespaces,trueval,falseval}',
'falseval')
if __name__ == '__main__':
unittest.main()
```
#### File: path-macrotemplate/test/test_interface.py
```python
import unittest
import tmep
class TestClasses(unittest.TestCase):
def setUp(self):
self.template = '${lastname}; ${prename}'
self.values = {'prename': 'Franz', 'lastname': 'Schubert'}
template = tmep.Template(self.template)
functions = tmep.Functions(self.values)
self.out = template.substitute(self.values, functions.functions)
def test_values(self):
self.assertEqual(self.out, 'Schubert; Franz')
class TestDefinitionParse(unittest.TestCase):
def setUp(self):
self.parse = tmep.parse
self.template = '${lastname}; ${prename}'
self.values = {'prename': 'Franz', 'lastname': 'Schubert'}
def lol(value):
return 'lol' + value + 'lol'
def troll(value):
return 'troll' + value + 'troll'
self.functions = {'lol': lol, 'troll': troll}
def test_values(self):
out = self.parse(self.template, self.values)
self.assertEqual(out, 'Schubert; Franz')
def test_parameter_functions(self):
template = '%lol{$prename}%troll{$lastname}'
out = self.parse(template, self.values, functions=self.functions)
self.assertEqual(out, 'lolFranzloltrollSchuberttroll')
def test_parameter_additional_functions(self):
template = '%lol{$prename}%troll{$lastname}'
out = self.parse(
template, self.values, additional_functions=self.functions)
self.assertEqual(out, 'lolFranzloltrollSchuberttroll')
out = self.parse(template, self.values)
self.assertEqual(out, template)
class TestDoc(unittest.TestCase):
def test_import(self):
self.assertTrue(tmep.doc.Doc)
if __name__ == '__main__':
unittest.main()
```
#### File: path-macrotemplate/tmep/__init__.py
```python
from tmep import doc
from tmep import functions
from tmep import template
from tmep._version import get_versions
__version__ = get_versions()['version']
del get_versions
doc
class Template(template.Template):
def __init__(self, template):
super(Template, self).__init__(template)
class Functions(functions.Functions):
def __init__(self, values=None):
super(Functions, self).__init__(values)
def parse(template, values=None, additional_functions=None, functions=None):
template_ = Template(template)
if not functions:
functions_ = Functions(values)
functions = functions_.functions()
if additional_functions:
for k, v in additional_functions.items():
functions[k] = v
return template_.substitute(values, functions)
``` |
{
"source": "Josef-Friedrich/phrydy",
"score": 2
} |
#### File: Josef-Friedrich/phrydy/setup.py
```python
import os
from setuptools import setup, find_packages
import versioneer
def read(file_name: str) -> str:
"""
Read the contents of a text file and return its content.
:param str file_name: The name of the file to read.
:return: The content of the text file.
:rtype: str
"""
return open(
os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8'
).read()
setup(
name='phrydy',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='A easy wrapper for mutagen',
url='https://github.com/Josef-Friedrich/phrydy',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
long_description=read('README.rst'),
long_description_content_type='text/x-rst',
install_requires=[
'ansicolor==0.3.2',
'mediafile==0.9.0',
'typing-extensions==4.1.1',
],
scripts=['bin/phrydy-debug'],
zip_safe=False)
```
#### File: phrydy/test/test_interface.py
```python
import unittest
import os
from test import helper
import phrydy
from phrydy import MediaFileExtended
class TestInterface(unittest.TestCase):
def test_mediafile_class_in_init(self):
mediafile = MediaFileExtended(os.path.join(helper.TEST_RESOURCES_PATH,
'full.mp3'))
self.assertEqual(mediafile.title, 'full')
def test_import_phrydy_media_file(self):
self.assertEqual(phrydy.MediaFile.__name__, 'MediaFile')
def test_import_phrydy_media_file_extended(self):
self.assertEqual(phrydy.MediaFileExtended.__name__,
'MediaFileExtended')
def test_import_phrydy_format_fields_as_txt(self):
self.assertEqual(phrydy.format_fields_as_txt.__name__,
'format_fields_as_txt')
def test_module_import_mediafile(self):
mediafile = MediaFileExtended(
os.path.join(helper.TEST_RESOURCES_PATH, 'full.mp3')
)
self.assertEqual(mediafile.title, 'full')
def test_module_import_doc(self):
fields = phrydy.doc_generator.fields
self.assertTrue(fields)
``` |
{
"source": "Josef-Friedrich/python-scripts",
"score": 3
} |
#### File: python-scripts/jfscripts/extract_pdftext.py
```python
from jfscripts import __version__
from jfscripts._utils import check_dependencies, FilePath, Run
import argparse
import os
import re
import tempfile
import textwrap
run = Run()
line_length = 72
tmp_dir = tempfile.mkdtemp()
output_file = open('export.txt', 'w')
dependencies = (
('pdftotext', 'poppler'),
('pdfinfo', 'poppler'),
)
class Txt(object):
def __init__(self, path):
self.path = path
self.file = open(str(path), 'w')
def add_line(self, line):
self.file.write(line + '\n')
print(line)
def get_page_count(pdf):
pdfinfo_stdout = run.check_output(['pdfinfo', str(pdf)])
match = re.search(r'Pages:\s*(.*)\n', pdfinfo_stdout.decode('utf-8'))
if match:
return int(match.group(1))
def get_text_per_page(pdf, page, txt_file):
page = str(page)
tmp_txt_path = os.path.join(tmp_dir, page + '.txt')
run.check_output([
'pdftotext',
'-f', page,
'-l', page,
str(pdf),
tmp_txt_path
])
tmp_txt_file = open(tmp_txt_path, 'r')
lines = tmp_txt_file.read().splitlines()
full_lines = []
for line in lines:
if len(line) > 20:
full_lines.append(line)
text_of_page = ' '.join(full_lines)
text_of_page = text_of_page.replace("'", u'’')
text_of_page = re.sub(r'[^a-zäöüA-ZÄÖÜß0-9 ]', '', text_of_page)
text_of_page = re.sub(r'\s+', ' ', text_of_page)
wrapped_lines = textwrap.wrap(text_of_page, line_length)
for line in wrapped_lines:
txt_file.add_line(line)
def get_parser():
"""The argument parser for the command line interface.
:return: A ArgumentParser object.
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'file',
help='A PDF file containing text',
)
parser.add_argument(
'-c',
'--colorize',
action='store_true',
help='Colorize the terminal output.',
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Make the command line output more verbose.',
)
parser.add_argument(
'-V',
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__),
)
return parser
def main():
args = get_parser().parse_args()
run.setup(verbose=args.verbose, colorize=args.colorize)
check_dependencies(*dependencies)
pdf = FilePath(args.file, absolute=True)
txt_path = pdf.new(extension='txt')
txt_file = Txt(txt_path)
page_count = get_page_count(pdf)
txt_file.add_line('# ' + pdf.basename)
for i in range(1, page_count + 1):
txt_file.add_line('')
txt_file.add_line('-' * line_length)
txt_file.add_line('')
txt_file.add_line('## Seite ' + str(i))
txt_file.add_line('')
get_text_per_page(pdf, i, txt_file)
if __name__ == '__main__':
main()
```
#### File: python-scripts/jfscripts/find_dupes_by_size.py
```python
from jfscripts import __version__
import argparse
import os
def check_for_duplicates(path):
print(path)
sizes = {}
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
size = os.path.getsize(full_path)
if size in sizes:
sizes[size].append(full_path)
else:
sizes[size] = [full_path]
count = 0
duplicate_paths = {}
for size, full_paths in sizes.items():
if len(full_paths) > 1:
count += 1
full_paths.sort()
duplicate_paths[full_paths[0]] = full_paths
for key, full_paths in sorted(duplicate_paths.items()):
print('-----------------------------------------')
for full_path in full_paths:
print('rm -f "' + full_path + '"')
print('Duplicates found: ' + str(count))
def get_parser():
"""The argument parser for the command line interface.
:return: A ArgumentParser object.
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
description='Find duplicate files by size.',
)
parser.add_argument(
'path',
help='A directory to recursively search for duplicate files.',
)
parser.add_argument(
'-V',
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__),
)
return parser
def main():
args = get_parser().parse_args()
check_for_duplicates(args.path)
if __name__ == '__main__':
main()
```
#### File: python-scripts/jfscripts/_utils.py
```python
import os
import shutil
import subprocess
from termcolor import colored
class Run(object):
PIPE = subprocess.PIPE
def __init__(self, *args, **kwargs):
self.setup(*args, **kwargs)
def setup(self, verbose=False, colorize=False):
self.verbose = verbose
self.colorize = colorize
def _print_cmd(self, cmd):
if self.colorize:
output = []
for arg in cmd:
if arg.startswith('--'):
output.append(colored(arg, color='yellow'))
elif arg.startswith('-'):
output.append(colored(arg, color='blue'))
elif os.path.exists(arg):
output.append(colored(arg, color='white',
on_color='on_cyan'))
else:
output.append(arg)
print(' '.join(output))
else:
print(' '.join(cmd))
def run(self, *args, **kwargs):
"""
:return: A `CompletedProcess` object.
:rtype: subprocess.CompletedProcess
"""
if self.verbose:
self._print_cmd(args[0])
return subprocess.run(*args, **kwargs)
def check_output(self, *args, **kwargs):
if self.verbose:
self._print_cmd(args[0])
return subprocess.check_output(*args, **kwargs)
def check_dependencies(*executables, raise_error=True):
"""Check if the given executables are existing in $PATH.
:param tuple executables: A tuple of executables to check for their
existence in $PATH. Each element of the tuple can be either a string
(e. g. `pdfimages`) or a itself a tuple `('pdfimages', 'poppler')`.
The first entry of this tuple is the name of the executable the second
entry is a description text which is displayed in the raised exception.
:param bool raise_error: Raise an error if an executable doesn’t exist.
:return: True or False. True if all executables exist. False if one or
more executables not exist.
:rtype: bool
"""
errors = []
for executable in executables:
if isinstance(executable, tuple):
if not shutil.which(executable[0]):
errors.append('{} ({})'.format(executable[0], executable[1]))
else:
if not shutil.which(executable):
errors.append(executable)
if errors:
if raise_error:
raise SystemError('Some commands are not installed: ' +
', '.join(errors))
else:
return False
else:
return True
class FilePath(object):
def __init__(self, path, absolute=False):
self.absolute = absolute
"""Boolean, indicates wheter the path is an absolute path or an
relative path."""
if self.absolute:
self.path = os.path.abspath(path)
"""The absolute (`/home/document/file.ext`) or the relative path
(`document/file.ext`) of the file."""
else:
self.path = os.path.relpath(path)
self.filename = os.path.basename(path)
"""The filename is the combination of the basename and the
extension, e. g. `file.ext`."""
self.extension = os.path.splitext(self.path)[1][1:]
"""The extension of the file, e. g. `ext`."""
self.basename = self.filename[:-len(self.extension) - 1]
"""The basename of the file, e. g. `file`."""
self.base = self.path[:-len(self.extension) - 1]
"""The path without an extension, e. g. `/home/document/file`."""
def __str__(self):
return self.path
def __eq__(self, other):
return self.path == other.path
def _export(self, path):
return FilePath(path, self.absolute)
def new(self, extension=None, append='', del_substring=''):
"""
:param str extension: The extension of the new file path.
:param str append: String to append on the basename. This string
is located before the extension.
:param str del_substring: String to delete from the new file path.
:return: A new file path object.
:rtype: FilePath
"""
if not extension:
extension = self.extension
new = '{}{}.{}'.format(self.base, append, extension)
if del_substring:
new = new.replace(del_substring, '')
return self._export(new)
def remove(self):
"""Remove the file."""
os.remove(self.path)
def argparser_to_readme(argparser, template='README-template.md',
destination='README.md', indentation=0,
placeholder='{{ argparse }}'):
"""Add the formatted help output of a command line utility using the
Python module `argparse` to a README file. Make sure to set the name
of the program (`prop`) or you get strange program names.
:param object argparser: The argparse parser object.
:param str template: The path of a template text file containing the
placeholder. Default: `README-template.md`
:param str destination: The path of the destination file. Default:
`README.me`
:param int indentation: Indent the formatted help output by X spaces.
Default: 0
:param str placeholder: Placeholder string that gets replaced by the
formatted help output. Default: `{{ argparse }}`
"""
help_string = argparser().format_help()
if indentation > 0:
indent_lines = []
lines = help_string.split('\n')
for line in lines:
indent_lines.append(' ' * indentation + line)
help_string = '\n'.join(indent_lines)
with open(template, 'r', encoding='utf-8') as template_file:
template_string = template_file.read()
readme = template_string.replace(placeholder, help_string)
readme_file = open(destination, 'w')
readme_file.write(readme)
readme_file.close()
```
#### File: python-scripts/test/test_utils.py
```python
from _helper import TestCase
from jfscripts import _utils
from jfscripts._utils import FilePath
from jflib import Capturing
from unittest import mock
import os
import tempfile
import unittest
class TestClassRun(TestCase):
def test_argument_verbose(self):
run = _utils.Run(verbose=True)
self.assertEqual(run.verbose, True)
with Capturing() as output:
run.run(['ls', '-l'], stdout=run.PIPE)
self.assertEqual(output, ['ls -l'])
def test_argument_colorize(self):
run = _utils.Run(verbose=True, colorize=True)
self.assertEqual(run.colorize, True)
with Capturing() as output:
run.run(['ls', '-l'], stdout=run.PIPE)
self.assertEqual(output[0], 'ls \x1b[34m-l\x1b[0m')
def test_argument_colorize_path(self):
run = _utils.Run(verbose=True, colorize=True)
tmp = tempfile.mkstemp()[1]
with Capturing() as output:
run.run(['ls', tmp], stdout=run.PIPE)
self.assertIn('\x1b[46m\x1b[37m', output[0])
self.assertIn('\x1b[0m', output[0])
def test_method_check_output(self):
run = _utils.Run()
out = run.check_output(['ls', '-l'])
self.assertIn('jfscripts', out.decode('utf-8'))
def test_method_run(self):
run = _utils.Run()
ls = run.run(['ls', '-l'], stdout=run.PIPE)
self.assertEqual(ls.args, ['ls', '-l'])
self.assertEqual(ls.returncode, 0)
self.assertIn('jfscripts', ls.stdout.decode('utf-8'))
class TestCheckBin(TestCase):
def test_check_dependencies(self):
with mock.patch('shutil.which') as mock_which:
mock_which.return_value = '/bin/lol'
_utils.check_dependencies('lol')
def test_check_dependencies_nonexistent(self):
with mock.patch('shutil.which') as mock_which:
mock_which.return_value = None
with self.assertRaises(SystemError) as error:
_utils.check_dependencies('lol')
self.assertEqual(str(error.exception),
'Some commands are not installed: lol')
def test_check_dependencies_nonexistent_multiple(self):
with mock.patch('shutil.which') as mock_which:
mock_which.return_value = None
with self.assertRaises(SystemError) as error:
_utils.check_dependencies('lol', 'troll')
self.assertEqual(str(error.exception),
'Some commands are not installed: lol, troll')
def test_check_dependencies_nonexistent_multiple_with_description(self):
with mock.patch('shutil.which') as mock_which:
mock_which.return_value = None
with self.assertRaises(SystemError) as error:
_utils.check_dependencies(
('lol', 'apt install lol'),
'troll',
)
self.assertEqual(str(error.exception),
'Some commands are not installed: lol (apt '
'install lol), troll')
class TestClassFilePath(TestCase):
def test_attribute_filename(self):
file_path = FilePath('test.jpg')
self.assertEqual(file_path.filename, 'test.jpg')
def test_attribute_extension(self):
file_path = FilePath('test.jpg')
self.assertEqual(file_path.extension, 'jpg')
def test_attribute_basename(self):
file_path = FilePath('test.jpg')
self.assertEqual(file_path.basename, 'test')
file_path = FilePath('test.jpeg')
self.assertEqual(file_path.basename, 'test')
def test_attribute_base(self):
file_path = FilePath('test.jpg', absolute=True)
self.assertTrue(file_path.base.endswith('/test'))
def test_class_argument(self):
file_path = FilePath('test.jpg', absolute=True)
self.assertEqual(str(file_path), os.path.abspath('test.jpg'))
def test_class_magic_method(self):
file_path = FilePath('test.jpg')
self.assertEqual(str(file_path), 'test.jpg')
def test_method_new(self):
path = FilePath('test.jpg')
self.assertEqual(str(path.new()), 'test.jpg')
self.assertEqual(str(path.new(extension='png')), 'test.png')
self.assertEqual(str(path.new(append='123')), 'test123.jpg')
self.assertEqual(str(path.new(del_substring='est')), 't.jpg')
def test_class_magic_method_eq_not_equal(self):
a = FilePath('test1.jpg')
b = FilePath('test2.jpg')
self.assertFalse(a == b)
def test_class_magic_method_eq_equal(self):
a = FilePath('test.jpg')
b = FilePath('test.jpg')
self.assertTrue(a == b)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Josef-Friedrich/tmep",
"score": 3
} |
#### File: tmep/test/test_functions.py
```python
import unittest
import tmep
class TestFunctions(unittest.TestCase):
def setUp(self):
self.values = {
'prename': u'Franz',
'lastname': u'Schubert',
'lol': u'lol',
'troll': u'troll',
'genres': u'Pop; Rock; Classical Crossover',
'asciify': u'gennemgår',
'track': 7,
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# alphanum
def test_alpha(self):
self.parseEqual(u'%alpha{abc123}', u'abc ')
def test_alpha_genres(self):
self.parseEqual(u'%alpha{$genres}', u'Pop Rock Classical Crossover')
# alphanum
def test_alphanum_accent(self):
self.parseEqual(u'%alphanum{après-évêque}', u'apres eveque')
def test_alphanum_genres(self):
self.parseEqual(u'%alphanum{$genres}', u'Pop Rock Classical Crossover')
def test_alphanum_many(self):
self.parseEqual(u'%alphanum{a"&(&b}', u'a b')
# asciify
def test_asciify_literal(self):
self.parseEqual(u'%asciify{après évêque}', u'apres eveque')
def test_asciify_variable(self):
self.parseEqual(u'%asciify{$asciify}', u'gennemgar')
def test_asciify_foreign(self):
self.parseEqual(u'%asciify{Новыя старонкі}', u'<NAME>')
def test_asciify_german_umlaute(self):
self.parseEqual(u'%asciify{äÄöÖüÜ}', u'aeAeoeOeueUe')
def test_asciify_symbols_single(self):
self.parseEqual(u'%asciify{⚓}', u'')
def test_asciify_symbols_multiple(self):
self.parseEqual(u'%asciify{⚢⚣⚤⚥⚦⚧⚨⚩}', u'')
def test_asciify_symbols_mixed(self):
self.parseEqual(u'%asciify{a⚢b⚣⚤c}', u'abc')
# delchars
def test_delchars_single(self):
self.parseEqual(u'%delchars{x-x,-}', u'xx')
def test_delchars_multiple(self):
self.parseEqual(u'%delchars{x---x,-}', u'xx')
def test_delchars_no_match(self):
self.parseEqual(u'%delchars{x-x,_}', u'x-x')
def test_delchars_multiple_chars(self):
self.parseEqual(u'%delchars{x_-.x,_-.}', u'xx')
def test_delchars_unicode(self):
self.parseEqual(u'%delchars{öd,ö}', u'd')
def test_delchars_variable(self):
self.parseEqual(u'%delchars{$lastname,ue}', u'Schbrt')
# deldupchars
def test_deldupchars_default(self):
self.parseEqual(u'%deldupchars{a---b___c...d}', u'a-b_c.d')
def test_deldupchars_custom(self):
self.parseEqual(u'%deldupchars{a---b___c,-}', u'a-b___c')
def test_deldupchars_whitespace(self):
self.parseEqual(u'%deldupchars{a a, }', u'a a')
# first
def test_first(self):
self.parseEqual(u'%first{$genres}', u'Pop')
def test_first_skip(self):
self.parseEqual(u'%first{$genres,1,2}', u'Classical Crossover')
def test_first_different_sep(self):
self.parseEqual(
u'%first{Alice / Bob / Eve,2,0, / , & }',
u'Alice & Bob'
)
# if
def test_if_false(self):
self.parseEqual(u'x%if{,foo}', u'x')
def test_if_false_value(self):
self.parseEqual(u'x%if{false,foo}', u'x')
def test_if_true(self):
self.parseEqual(u'%if{bar,foo}', u'foo')
def test_if_else_false(self):
self.parseEqual(u'%if{,foo,baz}', u'baz')
def test_if_else_false_value(self):
self.parseEqual(u'%if{false,foo,baz}', u'baz')
def test_if_int_value(self):
self.parseEqual(u'%if{0,foo,baz}', u'baz')
# ifdef
def test_if_def_field_return_self(self):
self.parseEqual(u'%ifdef{lastname}', u'')
def test_if_def_field_not_defined(self):
self.parseEqual(u'%ifdef{bar}', u'')
def test_if_def_true(self):
self.parseEqual(u'%ifdef{lastname,Its true}', u'Its true')
def test_if_def_true_complete(self):
self.parseEqual(u'%ifdef{lastname,lol,troll}', u'lol')
def test_if_def_false_complete(self):
self.parseEqual(u'%ifdef{trill,lol,troll}', u'troll')
# initial
def test_initial_use_first_character(self):
self.parseEqual(u'%initial{abc}', u'a')
def test_initial_german_umlaut(self):
self.parseEqual(u'%initial{ä}', u'a')
def test_initial_special_characters(self):
self.parseEqual(u'%initial{-a-}', u'a')
def test_initial_nothing(self):
self.parseEqual(u'%initial{}', u'_')
def test_initial_number(self):
self.parseEqual(u'%initial{3}', u'0')
def test_initial_lower(self):
self.parseEqual(u'%initial{A}', u'a')
# left
def test_left_literal(self):
self.parseEqual(u'%left{Schubert, 3}', u'Sch')
def test_left_variable(self):
self.parseEqual(u'%left{$lastname, 3}', u'Sch')
# lower
def test_lower_literal(self):
self.parseEqual(u'%lower{SCHUBERT}', u'schubert')
def test_lower_variable(self):
self.parseEqual(u'%lower{$lastname}', u'schubert')
# nowhitespace
def test_nowhitespace(self):
self.parseEqual(u'%nowhitespace{$genres}',
u'Pop;-Rock;-Classical-Crossover')
def test_nowhitespace_inline(self):
self.parseEqual(u'%nowhitespace{a b}', 'a-b')
def test_nowhitespace_multiple(self):
self.parseEqual(u'%nowhitespace{a b}', 'a-b')
def test_nowhitespace_newline_tab(self):
self.parseEqual(u'%nowhitespace{a\n\tb}', 'a-b')
def test_nowhitespace_replace_character(self):
self.parseEqual(u'%nowhitespace{a b,_}', 'a_b')
def test_nowhitespace_delete(self):
self.parseEqual(u'%nowhitespace{a b,}', 'ab')
# num
def test_num_literal(self):
self.parseEqual(u'%num{7,3}', u'007')
def test_num_variable(self):
self.parseEqual(u'%num{$track,3}', u'007')
def test_num_default_count(self):
self.parseEqual(u'%num{7}', u'07')
def test_num_default_variable(self):
self.parseEqual(u'%num{$track}', u'07')
# replchars
def test_replchars_literal(self):
self.parseEqual(u'%replchars{Schubert,-,ue}', u'Sch-b-rt')
def test_replchars_variable(self):
self.parseEqual(u'%replchars{$lastname,-,ue}', u'Sch-b-rt')
# right
def test_right_literal(self):
self.parseEqual(u'%right{Schubert,3}', u'ert')
def test_right_variable(self):
self.parseEqual(u'%right{$lastname,3}', u'ert')
# sanitize
def test_sanitize_literal(self):
self.parseEqual(u'%sanitize{x:*?<>|\/~&x}', u'xx') # noqa: W605
# shorten
def test_shorten_literal(self):
self.parseEqual(u'%shorten{Lorem ipsum dolor sit,10}', u'Lorem')
def test_shorten_default(self):
self.parseEqual(
u'%shorten{Lorem ipsum dolor sit amet consectetur adipisicing}',
u'Lorem ipsum dolor sit amet')
# title
def test_title_literal(self):
self.parseEqual(u'%title{franz schubert}', u'<NAME>')
def test_title_variable(self):
self.parseEqual(u'%title{$lol $troll}', u'Lol Troll')
# upper
def test_upper_literal(self):
self.parseEqual(u'%upper{foo}', u'FOO')
def test_upper_variable(self):
self.parseEqual(u'%upper{$prename}', u'FRANZ')
#
def test_nonexistent_function(self):
self.parseEqual(u'%foo{bar}', u'%foo{bar}')
class TestFunctionIfDefEmpty(unittest.TestCase):
def setUp(self):
self.values = {
'empty_string': u'',
'false': False,
'non_empty_string': u'test',
'none': None,
'only_whitespaces': u' \t\n',
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# empty_string
def test_empty_string(self):
self.parseEqual(u'%ifdefempty{empty_string,trueval}', u'trueval')
# false
def test_false(self):
self.parseEqual(u'%ifdefempty{false,trueval}', u'trueval')
# non_empty_string
def test_non_empty_string(self):
self.parseEqual(u'%ifdefempty{non_empty_string,trueval,falseval}',
u'falseval')
# nonexistent
def test_nonexistent(self):
self.parseEqual(u'%ifdefempty{nonexistent,trueval,falseval}',
u'trueval')
# none
def test_none(self):
self.parseEqual(u'%ifdefempty{none,trueval}', u'trueval')
# nonexistent
def test_only_whitespaces(self):
self.parseEqual(u'%ifdefempty{only_whitespaces,trueval,falseval}',
u'trueval')
class TestFunctionIfDefNotEmpty(unittest.TestCase):
def setUp(self):
self.values = {
'empty_string': u'',
'false': False,
'non_empty_string': u'test',
'none': None,
'only_whitespaces': u' \t\n',
}
def parseEqual(self, a, b):
self.assertEqual(tmep.parse(a, self.values), b)
# empty_string
def test_empty_string(self):
self.parseEqual(u'%ifdefnotempty{empty_string,trueval,falseval}',
u'falseval')
# false
def test_false(self):
self.parseEqual(u'%ifdefnotempty{false,trueval,falseval}',
u'falseval')
# non_empty_string
def test_non_empty_string(self):
self.parseEqual(u'%ifdefnotempty{non_empty_string,trueval,falseval}',
u'trueval')
# nonexistent
def test_nonexistent(self):
self.parseEqual(u'%ifdefnotempty{nonexistent,trueval,falseval}',
u'falseval')
# none
def test_none(self):
self.parseEqual(u'%ifdefnotempty{none,trueval,falseval}', u'falseval')
# nonexistent
def test_only_whitespaces(self):
self.parseEqual(u'%ifdefnotempty{only_whitespaces,trueval,falseval}',
u'falseval')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Josef-Gray/adorable-hopelessness",
"score": 3
} |
#### File: Josef-Gray/adorable-hopelessness/adhop.py
```python
import sys
import pygame
import flags
from background import Background
from actor import Player
from stats import Statistics
from mission import MissionList
import screen
def main():
"""Run the game."""
flags.init_flags("Adorable Hopelessness RPG.")
# Initialize game and create a background surface object.
pygame.init()
bg = Background()
pygame.display.set_caption("Adorable Hopelessness")
# Set player name
player = Player()
screen.PlayerNameScreen(bg, player).run()
# Initialize stats
stats = Statistics()
# Create mission list
mission_list = MissionList()
# Loop ready / results
while True:
# Ready to adventure?
screen.ReadyScreen(bg, player).run()
# Choose an adventure
screen.AdventureMenuScreen(bg, mission_list).run()
# Run adventure
screen.AdventureResultScreen(bg, stats, player,
mission_list.get_active_mission()).run()
main()
```
#### File: Josef-Gray/adorable-hopelessness/flags.py
```python
import argparse
import logging
def init_flags(description):
"""Enable standard flags."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-v", "--verbose", help="enable verbose logging",
action="store_true")
parser.add_argument("--debug", help="enable debug logging",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(level=logging.INFO)
```
#### File: Josef-Gray/adorable-hopelessness/mission.py
```python
import logging
from random import randrange, shuffle, choice
from itertools import cycle
import flags
from actor import Actor, Player
# "Constants" for combat result
WIN = 0
LOSE = 1
RETREAT = 2
class Mission():
"""A representation of a combat mission."""
def __init__(self, title='Title', enemy_name='Enemy'):
"""Initialize mission attributes."""
# Set the title.
self.title = title
# Create an enemy.
self.enemy = Actor(enemy_name)
self.enemy.log_properties()
# Default result is None.
self.result = None
def resolve_combat(self, player):
"""Resolve combat.
Args:
player: The player character.
Returns:
(int) "Constants" WIN, RETREAT, or LOSE
"""
# Randomize whether player or enemy hits first.
actors = [player, self.enemy]
shuffle(actors)
# Perform combat
for i in cycle(range(2)):
# End combat if actor has hp less than retreat ratio
if actors[i].hp <= actors[i].max_hp * actors[i].retreat_ratio:
break
damage = randrange(
actors[i].min_damage, actors[i].max_damage + 1)
actors[i-1].hp -= damage
logging.debug(
actors[i].name + " hits for " + str(damage) + ". "
+ actors[i-1].name + " " + str(actors[i-1].hp)
+ " HP remaining.")
logging.debug(player.name + ": " + str(player.hp) + "HP")
logging.debug(self.enemy.name + ": " + str(self.enemy.hp) + "HP")
# Report results.
if self.enemy.hp <= 0:
logging.info(player.name + " won.")
self.result = WIN
elif player.hp > 0:
logging.info(player.name + " withdrew.")
self.result = RETREAT
else:
player.hp = 0
logging.info(player.name + " defeated.")
self.result = LOSE
return self.result
class MissionList():
"""A set of runnable missions."""
def __init__(self):
"""Initialize mission list attributes."""
self.active_mission = None
self.build_mission_list()
def build_mission_list(self):
"""Populate mission list."""
self.missions = []
self.missions.append(Mission('Slay the Rat', 'Rat'))
titles = ['Storm the Castle']
enemies = ['Goblin']
self.missions.append(Mission(choice(titles), choice(enemies)))
def get_active_mission(self):
"""Return the active mission."""
return self.active_mission
def set_active_mission(self, mission):
"""Set the active mission."""
self.active_mission = mission
# Execute this only if running as a standalone
if __name__ == "__main__":
flags.init_flags("Combat mission test module.")
player = Player()
mission = Mission()
logging.debug(mission.resolve_combat(player))
``` |
{
"source": "JosefGst/autorace",
"score": 3
} |
#### File: JosefGst/autorace/DataLoader_sequence.py
```python
from PIL import Image
import os
import torch
import torch.utils.data
import pandas
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import csv
import json
import math
import random
seed = 123
class SelfDriveDataset(Dataset):
def __init__(self, dataset_list_list_dicts, transform = None):
self.dataset_list_list_dicts = dataset_list_list_dicts
self.transform = transform
def __len__(self):
return len(self.dataset_list_list_dicts)
def __getitem__(self, idx):
this_data = self.dataset_list_list_dicts[idx]
rgbs = []
for d in this_data: # this_data is a list of cfg.SEQUENCE_LENGTH dicts, each dict is a record of one-frame data.
rgb_path = d['image_path']
rgb = Image.open(rgb_path)
if self.transform is not None: # add noise to the dataset...just for training...
rgb = self.transform(rgb)
rgbs.append(rgb)
rgbs = torch.stack( [transforms.ToTensor()(rgbs[k]) for k in range(len(rgbs))], dim=0 )
future_steer = np.array(this_data[-1]['angle'])
future_throttle = np.array(this_data[-1]['throttle'])
sample = {'rgb': rgbs,
'steering': torch.from_numpy(future_steer).float(),
'throttle': torch.from_numpy(future_throttle).float()}
return sample
def load_split_train_valid(cfg, train_data_list_list_dicts, val_data_list_list_dicts, num_workers=2):
batch_size = cfg.BATCH_SIZE
if cfg.COLOR_JITTER_TRANSFORMS:
train_transforms = transforms.Compose([transforms.ColorJitter(brightness=0.5, contrast=0.3, saturation=0.3, hue=0.3)]) # add image noise later...
print('using COLOR_JITTER_TRANSFORMS during training...')
else:
train_transforms = None
train_data = SelfDriveDataset(train_data_list_list_dicts,transform=train_transforms)
valid_data = SelfDriveDataset(val_data_list_list_dicts,transform=None)
trainloader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)
validloader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)
return trainloader, validloader
``` |
{
"source": "josefigueredo/telegram-py3-aws-serverless-poc",
"score": 3
} |
#### File: josefigueredo/telegram-py3-aws-serverless-poc/handler.py
```python
import json
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "./vendored"))
import requests
TOKEN = os.environ['TELEGRAM_TOKEN']
BASE_URL = "https://api.telegram.org/bot{}".format(TOKEN)
def hello(event, context):
try:
data = json.loads(event["body"])
message = str(data["message"]["text"])
chat_id = data["message"]["chat"]["id"]
first_name = data["message"]["chat"]["first_name"]
response = "Please /start, {}".format(first_name)
if "start" in message:
response = "Hello {}".format(first_name)
data = {"text": response.encode("utf8"), "chat_id": chat_id}
url = BASE_URL + "/sendMessage"
requests.post(url, data)
except Exception as e:
print(e)
return {"statusCode": 200}
``` |
{
"source": "Josefiino/Anki-Def-Updater",
"score": 3
} |
#### File: Josefiino/Anki-Def-Updater/Load_Dictionaries_2.py
```python
import os
import json
import re
# Getting dictionary paths
file_path = os.getcwd()
dict_path = file_path + "\Dictionaries\大辞林"
# dict_path = file_path + "\Dictionaries\jmdict_english"
# Creates a list of the dictionary files, fixing the order
def get_file_list(path):
arr = os.listdir(path)
arr2 = [None]*len(arr)
for i in range(len(arr2)):
arr2[i]=f'term_bank_{i}.json'
arr2[0] = 'index.json'
return arr2
# Load Dictionaries based on the list of files into memory
def load_dict(list_of_files):
all_jsons =[]
for i,file in enumerate(list_of_files):
with open("{}\\{}".format(dict_path,list_of_files[i]), encoding="utf-8") as file:
data = json.load(file)
all_jsons.append(data)
print("------------------------------\nLoaded ",len(all_jsons), 'json files\n------------------------------')
for i, list_item in enumerate(all_jsons):
print(list_of_files[i],'--> ',len(list_item), 'entries.')
print("------------------------------")
return all_jsons
# Query Dictionary
def query_dict(list_entries, query):
result = []
for bank in list_entries:
for entry in bank:
if entry[0] == query:
def_str = ' '.join(map(str, entry[5]))
def_list = def_str.split('\n')
def_list += def_list.pop()
# # special case for jmdict
# if len(entry) == 8 and type(entry[4]) == type(21):
# def_list = [definition for definition in entry[5]]
# result.append(def_list)
# continue
## if you wan to add the term and reading in the results
# def_list.insert(0,f"{entry[0]} ")
# def_list.insert(1, f"{entry[1]}")
result.append(def_list)
# print(len(result))
return result
# Print results decently
def print_results(results_list):
for result in results_list:
for element in result:
print(element)
# Try to remove example sentences
# new_ele = re.sub('\。\「.*$', "", element)
# print(new_ele)
``` |
{
"source": "JoseFilipeFerreira/merossd",
"score": 2
} |
#### File: JoseFilipeFerreira/merossd/server.py
```python
import asyncio
from quart import Quart
from meross import MerossWrapper
app = Quart(__name__)
light = MerossWrapper()
async def reload_if_exception(f):
try:
return await f()
except Exception as e:
print(e)
global light
light = MerossWrapper()
return str(e)
@app.route('/bulb/')
async def status():
return light.status()
@app.route('/bulb/on')
async def on():
return await reload_if_exception(lambda: light.on())
@app.route('/bulb/off')
async def off():
return await reload_if_exception(lambda: light.off())
@app.route('/bulb/toggle')
async def toggle():
return await reload_if_exception(lambda: light.toggle())
loop = asyncio.get_event_loop()
loop.run_until_complete(light.connect())
app.run(host='0.0.0.0', port=4200, loop=loop)
``` |
{
"source": "JoseFilipeFerreira/Nautilus-wallpaper",
"score": 3
} |
#### File: JoseFilipeFerreira/Nautilus-wallpaper/nautilus.py
```python
import requests
import discord
import asyncio
import sys
import os
from discord.ext import commands
from datetime import datetime, timedelta
from operator import itemgetter
from PIL import Image, ImageOps, ImageDraw
bot = commands.Bot(command_prefix = '|')
bot.remove_command('help')
def main():
os.chdir(sys.path[0])
bot.run(open('auth').readline().rstrip())
@bot.event
async def on_ready():
stats = {}
pastTimeStamp = datetime.now() - timedelta(days=int(sys.argv[2]))
#Get server
s = None
for server in bot.guilds:
if server.name == sys.argv[1]:
s = server
if s == None:
print("No server found")
await bot.logout()
return
#Get server stats
for channel in s.channels:
if channel.type == discord.ChannelType.text:
lastMessage = None
total = 0
read = 0
size = 100
while size == 100:
size = 0
messages = await channel.history(
before = lastMessage,
limit = 100).flatten()
for msg in messages:
size += 1
if msg.created_at > pastTimeStamp and not msg.author.bot:
read += 1
if msg.author.id in stats:
stats[msg.author.id]["msg"] += 1
else:
stats[msg.author.id] = {
"url": str(msg.author.avatar_url_as(format="png")),
"msg": 1}
lastMessage = msg
if lastMessage != None and msg.created_at < pastTimeStamp:
size = 0
break
total += size
if read > 0:
print("{0}:\t{1}".format(channel.name, read))
#Sort server stats
print("----------------")
sortedStats = []
for id in stats.keys():
sortedStats.append({
"id": id,
"url": stats[id]["url"],
"msg": stats[id]["msg"]})
sortedStats = sorted(sortedStats, key=itemgetter('msg'), reverse = True)
print("Individual Users: {}".format(len(sortedStats)))
print("----------------")
# get top 8 users
sortedStats = sortedStats[:8]
for stat in sortedStats:
print("{0}:\t{1}".format(
s.get_member(stat["id"]).display_name,
stat["msg"]))
#Dowload Pictures
print("----------------")
for stat in sortedStats:
stat["path"] = f"tmp/{stat['id']}.png"
print(f'Dowloading {stat["path"]}')
r = requests.get(stat["url"], allow_redirects=True)
open(stat["path"], 'wb').write(r.content)
#Generate Images
scl, pos = fib(8), positions(8)
x, y = scl[0] + scl[1], scl[0]
factor = round((int(sys.argv[3])/x) * 2)
x, y = x * factor, y * factor
total = Image.new('RGBA', (x,y) , (0, 0, 0, 0))
for n in range(0, 8):
profileImage, mask = getProfilePic(sortedStats[n], scl[n]*factor)
total.paste(profileImage, box=multTuple(pos[n], factor), mask=mask)
total.save("foreground.png", "PNG")
await bot.logout()
def getProfilePic(user, scale):
profileImage = Image.open(user["path"])
bigsize = (profileImage.size[0] * 3, profileImage.size[1] * 3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + bigsize, fill=255)
profileImage = profileImage.resize((scale, scale))
mask = mask.resize(profileImage.size, Image.ANTIALIAS)
return profileImage, mask
def scale(background, total, scl):
factor = min(background.height / total.height, background.width / total.width)
return total.resize((int(total.width * factor * scl), int(total.height * factor * scl)))
def fib(n):
#reversed fibonacci sequence
if n <= 0:
return []
if n == 1:
return [0]
result = [1, 1]
if n == 2:
return result
for i in range(2, n):
result.append(result[i-1] + result[i-2])
result.reverse()
return result
def positions(val):
l = fib(val)
coor = [(0,0)]
for n in range (1, val, 4):
coor.append(
addTuple(
coor[n-1],
(l[n-1], 0)))
coor.append(
addTuple(
coor[n],
(l[n+2], l[n])))
coor.append(
addTuple(
coor[n-1],
addTuple(
(l[n-1], l[n-1]),
(0, -l[n+2]))))
coor.append(
addTuple(
coor[n-1],
addTuple(
(l[n-1], l[n-1]),
(0, -l[n+1]))))
return coor
def addTuple(t1, t2):
return tuple(map(lambda x, y: x + y, t1, t2))
def multTuple(t, f):
x, y = t
return (x*f, y*f)
main()
``` |
{
"source": "JoseFilipeFerreira/piCalc",
"score": 4
} |
#### File: random/squareCircle/squareCircle.py
```python
import sys
import random
from math import sqrt
def pointDist(x1, y1, x2, y2):
return sqrt((x2 - x1)**2 + (y2 - y1)**2)
def main():
total = 0
inside = 0
for i in range(0, int(sys.argv[1])):
total += 1
x = random.random()
y = random.random()
if pointDist(x, y, 0.5, 0.5) <= 0.5:
inside += 1
print(4*(inside/total))
main()
``` |
{
"source": "josefina2206/hackerrank",
"score": 3
} |
#### File: warmup/solve-me-first/solution.py
```python
def solve_me_first(a, b):
# Ayuda: Escribe return a+b abajo
return a + b
num1 = int(input()) # Ayuda: input() lee una entrada desde stdin y luego int() lo transforma a un entero
num2 = int(input())
res = solve_me_first(num1, num2)
print(res) # Ayuda: print() sirve para escribir en stout. Más conocido como: "imprimir en pantalla"
``` |
{
"source": "JosefinaMedina/Deep-Learning-2021-P2",
"score": 3
} |
#### File: JosefinaMedina/Deep-Learning-2021-P2/RN_Perceptron.py
```python
import numpy as np
from matplotlib import pyplot as plt
from grafica import *
def entrena_Perceptron(X, T, alfa, MAX_ITE, dibuja=1, titulos=[]):
# Tamaño de los datos de entrada y títulos
nCantEjemplos = X.shape[0] # nro. de filas
nAtrib = X.shape[1] #nro. de columnas
# Inicializar la recta
W = np.random.uniform(-0.5, 0.5, size=nAtrib)
b = np.random.uniform(-0.5, 0.5)
if dibuja: # graficar
dibuPtos(X, T, titulos)
ph = dibuRecta(X, W, b)
hubo_cambio=True
ite=0
while (hubo_cambio and (ite<MAX_ITE)):
hubo_cambio=False
ite = ite + 1
# para cada ejemplo
for i in range(nCantEjemplos):
# Calcular la T
# neta=b
# for j in range(nAtrib):
# neta = neta + W[j] * X[i,j]
neta = b + sum(W * X[i,:])
y = 1 * (neta>0)
# Si no es correcta, corregir W y b
if not(y==T[i]):
hubo_cambio=True
# actualizamos los pesos W y b
# for j in range(nAtrib):
# W[j] = W[j] + alfa *(T[i]-y)*X[i,j]
W = W + alfa *(T[i]-y)*X[i,:]
b = b + alfa *(T[i]-y)
if dibuja: # graficar
print(ite)
ph = dibuRecta(X, W, b, ph)
return([W,b,ite])
def aplica_Perceptron(X, W, b):
cantEjemplos = X.shape[0]
nAtrib = X.shape[1]
Y = []
for e in range(cantEjemplos):
neta = b
for j in range(nAtrib):
neta = neta + W[j]*X[e,j]
Y.append((neta>0)*1)
# ---- Otra forma de hacer lo mismo ----
#netas = W @ entradas.T + b
#Y = 1* (netas>0)
return(Y)
``` |
{
"source": "JosefinaMedina/EjerciciosComputacion-Python",
"score": 4
} |
#### File: JosefinaMedina/EjerciciosComputacion-Python/P3_8b.py
```python
x1=int(input("Elemento x del primer vector: "))
y1=int(input("Elemento y del primer vector: "))
x2=int(input("Elemento x del segundo vector: "))
y2=int(input("Elemento y del segundo vector: "))
def resta1 (x1,x2):
return x2-x1
def resta2 (y1,y2):
return y2-y1
vector=(resta1(x1,x2),resta2(y1,y2))
print(vector)
``` |
{
"source": "JosefineAtMath/BoundaryWavelets",
"score": 3
} |
#### File: BoundaryWavelets/test/DataTest.py
```python
import scipy.io as sp
import numpy as np
import pywt
import matplotlib.pyplot as plt
import ReconFunctions as RF
def TestPlot(Name='Data', Row=1, Section=214, J=7, N=12, Wavelet='db3'):
'''
This function makes decompositions and reconstructions of a chosen
section of the data, both with boundary wavelets and with mirrored
extension. The difference between the orignal signal and the two
reconstructions are calculated and printed and all three signals
are plotted in the same figure.
INPUT:
Name : str
The MATLAB data file from which to load.
Row : int
The row in the dataset to use.
Section : int
Which section of the data to use. The samples that will be
used are: `[Section*2**N:Section*2**N+2**N]`.
J : int
The scale.
N : int
The number of iterations to use in the cascade algorithm.
Wavelet : str
The name of the wavelet to be used. eg: `'db2'`.
'''
data = sp.loadmat(Name)
phi = pywt.Wavelet(Wavelet).wavefun(level=14)[0][1:]
phi = phi[::2**(14-N)]
Signal = data['val'][Row, Section*2**N:Section*2**N+2**N]
x1 = RF.DecomBoundary(Signal, J, Wavelet, phi)
NewSignal1 = np.real(RF.ReconBoundary(x1, J, Wavelet, phi))
x2 = RF.DecomMirror(Signal, J, Wavelet, phi)
NewSignal2 = np.real(RF.ReconMirror(x2, J, Wavelet, phi))
dif1 = np.sum(np.abs(Signal-NewSignal1)**2)**(1/2)/2**N
dif2 = np.sum(np.abs(Signal-NewSignal2)**2)**(1/2)/2**N
print(dif1, dif2)
plt.figure()
plt.plot(Signal, label='Original')
plt.plot(NewSignal1, label='Boundary wavelets')
plt.plot(NewSignal2, label='Mirror')
plt.xlabel('Sample index')
plt.legend()
return
def Test(Name='Data', Row=1, J=7, N=12, Wavelet='db3'):
'''
This function makes decompositions and reconstructions of several
sections of the data, both with boundary wavelets and with
mirrored extension. The differences between the orignal signal and
the two reconstructions are calculated. The test is run for as
many disjoint sections of the signal as possible.
INPUT:
Name : str
The MATLAB data file from whichto load.
Row : int
The row in the dataset to use.
J : int
The scale.
N : int
The number of iterations to use in the cascade algorithm.
Wavelet : str
The name of the wavelet to be used. eg: `'db2'`.
OUTPUT:
Result : float64
2D array. The first row is the difference between the
original signal and the reconstruction using boundary
wavelet. The second row is the difference between the
original signal and the reconstruction using mirrored
extension. The third row is the first row minus the second
row. There is one collumn for each section of the signal.
'''
data = sp.loadmat(Name)
phi = pywt.Wavelet(Wavelet).wavefun(level=14)[0][1:]
phi = phi[::2**(14-N)]
n = 0
tests = int(len(data['val'][Row])/2**N)
Result = np.zeros((3, tests))
for i in range(tests):
Signal = data['val'][Row, n:n+2**N]
x1 = RF.DecomBoundary(Signal, J, Wavelet, phi)
x2 = RF.DecomMirror(Signal, J, Wavelet, phi)
NewSignal1 = np.real(RF.ReconBoundary(x1, J, Wavelet, phi))
NewSignal2 = np.real(RF.ReconMirror(x2, J, Wavelet, phi))
Result[0, i] = np.sum(np.abs(Signal-NewSignal1)**2)**(1/2)/2**N
Result[1, i] = np.sum(np.abs(Signal-NewSignal2)**2)**(1/2)/2**N
n += 2**N
Result[2] = Result[0]-Result[1]
plt.figure()
plt.plot(Result[1], label='Mirror', color='C1')
plt.plot(Result[0], label='Boundary', color='C0')
plt.xlabel('Test signal')
plt.ylabel('Difference')
plt.legend()
return Result
if __name__ == '__main__':
TestPlot()
# Test = Test()
```
#### File: BoundaryWavelets/test/ReconTest.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pywt
import ReconFunctions as RF
import boundwave.Orthonormal as Ot
def TestOfConFunc(J=3):
"""
Test and plot of constant and linear function.
"""
Wavelet = 'db2'
WaveletCoef = np.flipud(pywt.Wavelet(Wavelet).dec_lo)
phi = pywt.Wavelet(Wavelet).wavefun(level=15)[0][1:]
AL, AR = Ot.OrthoMatrix(J, WaveletCoef, phi)
x = np.ones(2**J)
x[1], x[-1] = 0, 0
x[0] = 1/AL[0, 0]
x[-2] = 1/AR[0, 0]
res = RF.ReconBoundary(x, J, Wavelet, phi)
plt.figure()
plt.plot(np.linspace(0, 2**J, 2**J, endpoint=True), np.ones(2**J),
label=r'$f(x)$')
plt.plot(np.linspace(0, 2**J, len(res), endpoint=True),
np.real(res), ls='dashed',
label=r'$\tilde{f}(x)$')
plt.xlabel(r'$x$')
print('Distance between signals:', np.linalg.norm(res-np.ones(len(res))))
print('x=', x)
import scipy.stats as stats
x = np.linspace(0, 2**J, 2**J, endpoint=False)
phi = pywt.Wavelet(Wavelet).wavefun(level=15)[0][1:]
res = RF.ReconBoundary(x, J, Wavelet, phi)
x1 = np.linspace(0, 2**J, len(res))[19980]
y1 = res[19980]
x2 = np.linspace(0, 2**J, len(res))[20000]
y2 = res[20000]
a = (y2-y1)/(x2-x1)
b = y2-(a*x2)
print('a = ', a, 'b = ', b)
y_end = a*2**J+b
a1 = (res[20]-res[0])/(x2-x1)
a2 = (res[-1]-res[-21])/(x2-x1)
r = a/a1
r2 = a/a2
x[1] = np.real(r)*x[1]
x[-1] = x[-1]*np.real(r2)
res2 = RF.ReconBoundary(x, J, Wavelet, phi)
T_start = np.zeros(15, dtype=complex)
RES_start = np.zeros(15, dtype=complex)
T_end = np.zeros(15, dtype=complex)
RES_end = np.zeros(15, dtype=complex)
for i in range(15):
t = i*0.1
x[0] = t
x[-2] = t
res2 = RF.ReconBoundary(x, J, Wavelet, phi)
T_start[i] = t
T_end[i] = t
RES_start[i] = res2[0]
RES_end[i] = res2[-1]
Stat_start = stats.linregress(T_start, RES_start)
Stat_end = stats.linregress(T_end, RES_end)
t2 = (b-Stat_start[1])/Stat_start[0]
t3 = (y_end-Stat_end[1])/Stat_end[0]
x[0] = np.real(t2)
x[-2] = np.real(t3)
res2 = RF.ReconBoundary(x, J, Wavelet, phi)
plt.plot(np.linspace(0, 2**J, 2**J),
np.real(a*np.linspace(0, 2**J, 2**J, dtype=complex)+b),
label=r'True Linear Signal')
plt.plot(np.linspace(0, 2**J, len(res2)), np.real(res2), ls='dashed',
label=r'Wavelet Reconstruction of Line')
plt.legend()
print('Distance between signals:',
np.linalg.norm(
np.real(res2) -
np.real(a*np.linspace(0, 2**J, len(res2), dtype=complex)+b)))
print('x=', x)
if __name__ == '__main__':
TestOfConFunc()
``` |
{
"source": "josefkerner/spark_vse",
"score": 3
} |
#### File: notebooks/databricks/04_Use_cases.py
```python
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("use_cases").getOrCreate()
# COMMAND ----------
# MAGIC %md
# MAGIC # 1. PTB model in telco
# MAGIC For this project we developed near complete automated pipeline. Here is one example of custom transformer, it's called Sampling transformer and is used to oversampling/undersampling data for classification.
# COMMAND ----------
from pyspark.ml import Transformer, Estimator, Pipeline
from pyspark.sql.functions import column as col
from pyspark.sql.functions import explode, array, lit
class SamplingTransformer(Transformer):
def __init__(self, labelCol = 'label', split = [0.7, 0.3], oversampling = 10, undersampling = 1, minority = 1):
""" This is a custom oversampling transformer used to oversample positive or negative observation
Attributes:
labelCol (str): Columns with target variable. Defaults to label
split (list(float)): Split to be used for the train/test ratio. Defaults to [0.7, 0.3].
oversampling (int): Factor of the oversampling. Defaults to 10.
undersampling (int): Factor of the undersampling. Defaults to 1.
minority (int): Which of the observation is the minority to be sampled. Defaults to 1.
"""
super(SamplingTransformer, self).__init__()
# Assign the constructor varibles
self.labelCol = labelCol
self.split = split
self.oversampling = oversampling
# Check if undersampling is smaller than one
if undersampling < 1:
print("undersampling must be greater than 1")
undersampling = 1
self.undersampling = undersampling
self.minority = minority
def sample(self, dataset):
""" This function takes input dataset and does the sampling based upon the inpur parameters."""
# Create list in range of the sample factor
samples = range(0, self.oversampling)
# Create a new column 'dummy' with array in range of samples and explode it. This is the fastest way to oversample in pySpark.
if self.minority == 1:
# Split the positive observations
train_data_positive,test_data_positive = dataset.filter(col(self.labelCol) == 1).randomSplit([0.7,0.3])
# Split the negative observations
train_data_negative,test_data_negative = dataset.filter(col(self.labelCol) == 0).randomSplit([(0.7/self.undersampling), 0.3])
train_data_positive = train_data_positive.withColumn("dummy", explode(array([lit(x) for x in samples]))).drop("dummy")
else:
# Split the positive observations
train_data_positive,test_data_positive = dataset.filter(col(self.labelCol) == 1).randomSplit([(0.7/self.undersampling),0.3])
# Split the negative observations
train_data_negative,test_data_negative = dataset.filter(col(self.labelCol) == 0).randomSplit([0.7, 0.3])
train_data_negative = train_data_negative.withColumn("dummy", explode(array([lit(x) for x in samples]))).drop("dummy")
# Union positive and negative train/test dataframes
train_data = train_data_negative.union(train_data_positive)
test_data = test_data_negative.union(test_data_positive)
return (train_data, test_data)
def _transform(self, dataset):
return self.sample(dataset)
# COMMAND ----------
raw_data = spark.read.parquet('/appl/wsp_data_science_workshop/cleanData_churn')
# COMMAND ----------
# MAGIC %md
# MAGIC See how much of each target variable we have:
# COMMAND ----------
raw_data.groupBy('label').count().show()
# COMMAND ----------
# MAGIC %md
# MAGIC Now we want to oversample the label = 1 by factor of 2:
# COMMAND ----------
st = SamplingTransformer(oversampling=3)
train_data, test_data = st.transform(raw_data)
# COMMAND ----------
train_data.groupBy('label').count().show()
# COMMAND ----------
test_data.groupBy('label').count().show()
# COMMAND ----------
# MAGIC %md
# MAGIC # 2. Recommendation engine - usage of config
# MAGIC Engine need to run for different segments. Rather than duplicating and making small changes to code, we use Config file.
# COMMAND ----------
from configparser import ConfigParser, ExtendedInterpolation
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read('config/config.ini')
# COMMAND ----------
config['MicroClustering']['output_table']
# COMMAND ----------
config['MasterParameters']['date_valid']
# COMMAND ----------
```
#### File: spark_vse/src/mlProcessor.py
```python
from pyspark.sql.types import StructField,StructType,FloatType
from pyspark.sql import DataFrame
import pyspark.sql.functions as F
from pyspark.sql.functions import col
class MLProcessor:
def __init__(self):
pass
def load_data(self) -> DataFrame:
HOUSING_DATA = '../input/cal_housing.data'
schema = StructType([
StructField("long", FloatType(), nullable=True),
StructField("lat", FloatType(), nullable=True),
StructField("medage", FloatType(), nullable=True),
StructField("totrooms", FloatType(), nullable=True),
StructField("totbdrms", FloatType(), nullable=True),
StructField("pop", FloatType(), nullable=True),
StructField("houshlds", FloatType(), nullable=True),
StructField("medinc", FloatType(), nullable=True),
StructField("medhv", FloatType(), nullable=True)]
)
housing_df = spark.read.csv(path=HOUSING_DATA, schema=schema).cache()
return housing_df
def exploratory_analysis(self, housing_df:DataFrame):
housing_df.take(5)
housing_df.show(5)
print(housing_df.columns)
housing_df.printSchema()
housing_df.select('pop', 'totbdrms').show(10)
# group by housingmedianage and see the distribution
result_df = housing_df.groupBy("medage").count().sort("medage", ascending=False)
result_df.show(10)
result_df.toPandas().plot.bar(x='medage', figsize=(14, 6))
# Adjust the values of `medianHouseValue`
def feature_preprocessing(self, housing_df:DataFrame):
housing_df = housing_df.withColumn("medhv", col("medhv") / 100000)
# Show the first 2 lines of `df`
housing_df.show(2)
# Add the new columns to `df`
housing_df = (housing_df.withColumn("rmsperhh", F.round(col("totrooms") / col("houshlds"), 2))
.withColumn("popperhh", F.round(col("pop") / col("houshlds"), 2))
.withColumn("bdrmsperrm", F.round(col("totbdrms") / col("totrooms"), 2)))
# Inspect the result
housing_df.show(5)
# Re-order and select columns
housing_df = housing_df.select("medhv",
"totbdrms",
"pop",
"houshlds",
"medinc",
"rmsperhh",
"popperhh",
"bdrmsperrm")
featureCols = ["totbdrms", "pop", "houshlds", "medinc", "rmsperhh", "popperhh", "bdrmsperrm"]
# put features into a feature vector column
assembler = VectorAssembler(inputCols=featureCols, outputCol="features")
assembled_df = assembler.transform(housing_df)
assembled_df.show(10, truncate=False)
# Initialize the `standardScaler`
standardScaler = StandardScaler(inputCol="features", outputCol="features_scaled")
# Fit the DataFrame to the scaler
scaled_df = standardScaler.fit(assembled_df).transform(assembled_df)
# Inspect the result
scaled_df.select("features", "features_scaled").show(10, truncate=False)
# Split the data into train and test sets
train_data, test_data = scaled_df.randomSplit([.8, .2], seed=rnd_seed)
# Initialize `lr`
lr = (LinearRegression(featuresCol='features_scaled', labelCol="medhv", predictionCol='predmedhv',
maxIter=10, regParam=0.3, elasticNetParam=0.8, standardization=False))
# Fit the data to the model
linearModel = lr.fit(train_data)
# Coefficients for the model
linearModel.coefficients
linearModel.intercept
coeff_df = pd.DataFrame({"Feature": ["Intercept"] + featureCols,
"Co-efficients": np.insert(linearModel.coefficients.toArray(), 0,
linearModel.intercept)})
coeff_df = coeff_df[["Feature", "Co-efficients"]]
# Generate predictions
predictions = linearModel.transform(test_data)
# Extract the predictions and the "known" correct labels
predandlabels = predictions.select("predmedhv", "medhv")
predandlabels.show()
# Get the RMSE
'''
The RMSE measures how much error there is between two datasets comparing a predicted value and an observed or known value.
The smaller an RMSE value, the closer predicted and observed values are.
'''
print("RMSE: {0}".format(linearModel.summary.rootMeanSquaredError))
'''
The R2 ("R squared") or the coefficient of determination is a measure that shows how close the data are to the fitted regression line. This score will always be between 0 and a 100% (or 0 to 1 in this case), where 0% indicates that the model explains none of the variability of the response data around its mean, and 100% indicates the opposite: it explains all the variability.
That means that, in general, the higher the R-squared, the better the model fits our data.
'''
print("MAE: {0}".format(linearModel.summary.meanAbsoluteError))
# Get the R2
print("R2: {0}".format(linearModel.summary.r2))
def eval_model(self):
evaluator = RegressionEvaluator(predictionCol="predmedhv", labelCol='medhv', metricName='rmse')
print("RMSE: {0}".format(evaluator.evaluate(predandlabels)))
evaluator = RegressionEvaluator(predictionCol="predmedhv", labelCol='medhv', metricName='mae')
print("MAE: {0}".format(evaluator.evaluate(predandlabels)))
evaluator = RegressionEvaluator(predictionCol="predmedhv", labelCol='medhv', metricName='r2')
print("R2: {0}".format(evaluator.evaluate(predandlabels)))
def start_workflow(self):
``` |
{
"source": "josef-kriz/Semester-Project-CS-IT8",
"score": 3
} |
#### File: Semester-Project-CS-IT8/incidents-analysis/misfire_preceding_incidents.py
```python
from chpdb import cursor
import pickle
import sys
import datetime
# iterates over all clusters of a group, queries the database and accumulates the statistics
def get_group_stats(cursor, groupIndex, group, stats, interval):
group_stats = {}
for index, cluster in enumerate(group):
# print status message every time 30 clusters are finished
if (index+1) % 30 == 0:
print("\t" + str(index+1) + " of " + str(len(group)))
# store database response in a map
group_stats[index] = (get_cluster_stats(cursor, cluster, interval))
print("Parsing group data...")
for index, row in group_stats.items():
# accumulate responses for every cluster into group statistics
stats = parse_group_stats(stats, groupIndex, row)
return stats
# queries the database for preceding incidents
def get_cluster_stats(cursor, cluster, interval):
start = cluster.date - datetime.timedelta(seconds=interval)
query = "SELECT haendelse, COUNT(*) FROM anlaegshaendelser " \
" WHERE anlaeg_id = " + str(cluster.machineID) + " AND dato < \"" + str(cluster.date) + "\" AND " \
" dato > \"" + str(start) + "\" GROUP BY haendelse ORDER BY haendelse;"
cursor.execute(query)
return cursor.fetchall()
# accumulate group statistics into a map where the key is error code
# we keep information about the count of incident clusters that had certain preceding incident
# we also keep the number of such preceding incidents that occured in the whole group
def parse_group_stats(stats, group, res):
for row in res:
code = row[0]
count = row[1]
# just check whether error code dictionary exists
try:
boo = stats[code]
except KeyError:
# if there is an error incialize dictionary
stats[code] = {}
# check whether group dictionary was created
try:
stats[code][group]['clusters'] += 1
stats[code][group]['total_count'] += count
except KeyError:
# create it if it does not exist yet
stats[code][group] = {
'clusters': 1,
'total_count': count
}
return stats
# goes through the dictionary of statistics and prints the relative counts of clusters that experienced certain
# incident in the interval prior to the cluster
def print_stats(groups, stats, data_interval, output):
groups = sorted(groups.items(), key=lambda x:x[0])
# write number of clusters in each group
output.write('Number of clusters,')
for index, group in groups:
no = len(group)
output.write(str(no) + ',')
output.write('\r\n')
# write a header with group labels
output.write('Error code,')
for group_index, group in groups:
lower_bound = (group_index - 1) * data_interval
upper_bound = group_index * data_interval
if group_index == 0:
output.write("Single misfires,")
else:
output.write("Grp #" + str(group_index) + ': ' + str(int(lower_bound)) + '-' + str(int(upper_bound)) + 's,')
output.write('\r\n')
# write statistics for each preceding error code in a line
sorted_codes = sorted(stats.keys())
for error_code in sorted_codes:
output.write(str(error_code))
for group_index, group in groups:
try:
value = stats[error_code][group_index]['clusters']
except KeyError:
value = 0
output.write("," + str((value / len(group))))
output.write('\r\n')
output.close()
path = sys.argv[1]
# path = 'data/clustersgrouped.rick'
data_interval = int(sys.argv[2])
# data_interval = 900
interval = int(sys.argv[3])
# interval = 1209600
output = sys.argv[4]
# output = 'data/preceding-events-stats.csv'
source_file = open(path, 'rb')
grouped_clusters = pickle.load(source_file)
dest_file = open(output, 'w')
# go through all cluster groups and collect the data into a dictionary
stats = {}
for index, group_index in grouped_clusters.items():
# if index < 5:
# continue
print(str(index+1) + " of " + str(len(grouped_clusters)) + " - # of clusters: " + str(len(group_index)))
stats = get_group_stats(cursor, index, group_index, stats, interval)
# print the data into a csv file
print_stats(grouped_clusters, stats, data_interval, dest_file)
```
#### File: training-data/src/classification_data_tools.py
```python
from src.classification_print_tools import print_data_statistics
def limit_negative_samples(features, targets, negative_count):
limited_features = []
limited_targets = []
for i in range(0, len(targets)):
if targets[i] == 1 or negative_count > 0:
limited_features.append(features[i])
limited_targets.append(targets[i])
if targets[i] == 0:
negative_count -= 1
return limited_features, limited_targets
def split_data(features, targets, training_ratio, neg_limit=False, print_stats=True):
boundary_index = int(len(features) * training_ratio)
training_data = [
features[:boundary_index],
targets[:boundary_index]
]
if neg_limit != False:
training_data[0], training_data[1] = limit_negative_samples(training_data[0], training_data[1], neg_limit)
test_data = [
features[boundary_index:],
targets[boundary_index:]
]
if print_stats:
print_data_statistics(training_data, test_data)
return training_data, test_data
```
#### File: training-data/src/incidents_data.py
```python
from src.chpdb import cursor
import datetime
def fill_in_empty_values(db_data, incident_ids):
res = [0] * len(incident_ids)
for row in db_data:
incident_id = row[0]
incident_count = row[1]
index = incident_ids.index(incident_id)
res[index] = incident_count
return res
def get_incidents_counts(machine_id, interval_start, interval_end, incident_ids):
query = "SELECT haendelse, COUNT(1) " \
"FROM anlaegshaendelser " \
"WHERE haendelse IN ({}) " \
"AND anlaeg_id = {} " \
"AND dato > '{}' " \
"AND dato < '{}' " \
"GROUP BY haendelse " \
"ORDER BY haendelse;".format(
','.join(str(x) for x in incident_ids),
machine_id,
interval_start,
interval_end
)
# print(query)
cursor.execute(query)
result = cursor.fetchall()
# print(len(result))
return result
def fetch_incidents(datetime, machine_id, interval, incident_ids):
interval_start = datetime - interval
interval_end = datetime
incident_counts = get_incidents_counts(machine_id, interval_start, interval_end, incident_ids)
return fill_in_empty_values(incident_counts, incident_ids)
# res = fetch_incidents(
# datetime.datetime(year=2017, month=12, day=1),
# 1000711803,
# datetime.timedelta(days=120),
# [78, 4, 112]
# )
# print(res)
``` |
{
"source": "JosefKuchar/genetic-rider",
"score": 3
} |
#### File: JosefKuchar/genetic-rider/population.py
```python
from rider import Rider
import win32gui
import time
import random
class Population:
def __init__(self, windowID, mouse, image_processing, riders=None):
self.riders = riders
self.size = 5
self.windowID = windowID
self.mouse = mouse
self.image_processing = image_processing
if not riders:
self.init()
def init(self):
self.riders = []
for i in range(0, self.size):
self.riders.append(Rider())
def run(self):
windowCoordinates = win32gui.GetWindowRect(self.windowID)
for rider in self.riders:
start_time = time.time()
for genom in rider.dna:
if self.image_processing.get_state() == 1:
break
if genom:
self.mouse.press(windowCoordinates[0] + 50, windowCoordinates[1] + windowCoordinates[3] - 50, 1)
else:
self.mouse.release(windowCoordinates[0] + 50, windowCoordinates[1] + windowCoordinates[3] - 50, 1)
time.sleep(0.033333)
self.mouse.release(windowCoordinates[0] + 50, windowCoordinates[1] + windowCoordinates[3] - 50, 1)
rider.fitness = time.time() - start_time
time.sleep(1.0)
def evaluate(self):
weights = [rider.fitness for rider in self.riders]
print("Scores: {}".format(weights))
print("Max: {}".format(max(weights)))
print("Min: {}".format(min(weights)))
print("Average: {}".format(sum(weights) / len(weights)))
print("")
population = []
for i in range(0, self.size):
parents = random.choices(self.riders, weights=weights, k=2)
child = parents[0].crossover(parents[1])
child.mutation()
population.append(child)
return Population(self.windowID, self.mouse, self.image_processing, population)
``` |
{
"source": "joseflauzino/vines-management-agent",
"score": 2
} |
#### File: joseflauzino/vines-management-agent/management_agent_api.py
```python
import os
import json
from flask import jsonify
from flask import request
from eve import Eve
from eve.auth import BasicAuth, requires_auth
from subprocess import check_output, call
import time
from util import *
#======================================================
# API end-points
#======================================================
app = Eve()
# -------- Basics (Begin) ----------
@app.route('/api/vnf-exp-status', methods=['GET'])
def ems_status():
return "Running"
# -------- Basics (End) ----------
# -------- Lifecycle (Begin) ----------
@app.route('/api/install', methods=['POST'])
def install_function():
status = os.system(generate_cmd(cmd_path,"install.sh"))
return str(verify_status(status))
@app.route('/api/start', methods=['POST'])
def start_function():
status = os.system(generate_cmd(cmd_path,"start.sh"))
if verify_status(status):
is_running = open('status','w')
is_running.write("1")
is_running.close()
return str(verify_status(status))
@app.route('/api/stop', methods=['POST'])
def stop_function():
status = os.system(generate_cmd(cmd_path,"stop.sh"))
if verify_status(status):
is_running = open('status','w')
is_running.write("0")
is_running.close()
return str(verify_status(status))
@app.route('/api/status', methods=['GET'])
def get_running():
vnfd = read_vnfd()
script_name = "get_function_status.sh "
for op in vnfd['vnfd']['lifecycle']:
if op['operation'] == 'status':
script_name = op['file']+' '
function_name = vnfd['vnfd']['app']
status = run_cmd("sh "+VINES_PATH+script_name+function_name)
return str(status) # Running or Stopped
@app.route('/api/push-vnfp', methods=['POST'])
def write_file():
repo = VINES_PATH+'vnfp.zip'
with open(repo, 'wb') as f:
f.write(request.data)
status = os.system('unzip %s -d %svnfp' % (repo,VINES_PATH))
return str(verify_status(status))
@app.route('/api/metrics', methods=['GET'])
def get_metrics():
memory = get_memory_usage()
cpu = get_cpu_usage()
rx = get_bandwidth_usage('rx')
tx = get_bandwidth_usage('tx')
return jsonify(
{
"time_ms": 0,
"list": [
{
"percent_usage": cpu,
"type": "cpu"
},
{
"percent_usage": 0,
"type": "disk"
},
{
"percent_usage": memory,
"type": "memory"
},
{
"percent_usage": tx,
"type": "net_tx"
},
{
"percent_usage": rx,
"type": "net_rx"
}
]
})
# -------- Lifecycle (End) ----------
# -------- Service Function Chaining (Begin) ----------
@app.route('/api/set-sfc-forwarding', methods=['POST'])
def setsfcforwarding():
# parse data
data = json.loads(request.data)
last_vnf = data['last_vnf']
next_vnf = data['next_vnf']
classifier = data['classifier']
# Enabling IP forward
enable_ip_forward_cmd = "sudo bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward'"
response = run_shell_cmd(enable_ip_forward_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not enable ip_forward")
for rule in classifier:
protocol = rule['protocol']
port = rule['port']
# Building commands lines
#enable_forward_cmd = "sudo iptables -A FORWARD -d %s -j ACCEPT" % (last_vnf)
enable_forward_cmd = "sudo iptables -A FORWARD -j ACCEPT"
config_forward_cmd = "sudo iptables -t nat -A PREROUTING -p %s --dport %s -j DNAT --to %s" % (protocol, port, next_vnf)
# Adding FORWARD rule
response = run_shell_cmd(enable_forward_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not enable add forward rule")
# Adding PREROUTING rule
response = run_shell_cmd(config_forward_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not add PREROUTING rule")
return build_response("success","VNF classifiers have been configured")
@app.route('/api/delete-sfc-forwarding', methods=['POST'])
def deletesfcforwarding():
# build command lines
clean_iptables_cmd = "sudo iptables -F"
clean_nat_iptables_cmd = "sudo iptables -t nat -F"
disable_ip_forward_cmd = "sudo bash -c 'echo 0 > /proc/sys/net/ipv4/ip_forward'"
response = run_shell_cmd(disable_ip_forward_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not disable ip_forward")
response = run_shell_cmd(clean_iptables_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not clean iptables")
response = run_shell_cmd(clean_nat_iptables_cmd)
if response["status"] == "ERROR":
return build_response("error","Could not clean iptables (NAT table)")
return build_response("success","SFC forwarding deleted")
# -------- Service Function Chaining (End) ----------
if __name__=='__main__':
app.run(host='0.0.0.0', port=8000)
```
#### File: joseflauzino/vines-management-agent/util.py
```python
import os
import shlex, subprocess
import json
from flask import jsonify
verify_status = lambda status: True if status == 0 else False
os.system('touch status | chmod +x status') # Create status file
VINES_PATH = '/opt/vines/'
cmd_path = VINES_PATH + "vnfp/Scripts/"
def read_vnfd():
with open('/opt/vines/vnfp/Definitions/VNFD.json') as json_file:
data = json_file.read()
return json.loads(data)
def generate_cmd(cmd_path, script_name):
cmd = "sh "+cmd_path+script_name
return cmd
def run_cmd(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output = process.communicate()
return output
def get_memory_usage():
try:
# execute 'free' command to get memory usage
#cmd = check_output(['free', '-t', '-m'])
cmd = run_cmd(['free', '-t', '-m'])
# get unused memory value (without swap)
memory_total = float(cmd.split('\n')[1].split()[1:][0])
memory_used = float(cmd.split('\n')[1].split()[1:][1])
memory_usage = float((memory_used/memory_total)*100)
return memory_usage
except:
return None
def get_cpu_usage():
try:
with open(VINES_PATH + '/resources/cpu_usage', 'r') as f_cpu:
cpu_usage = f_cpu.readlines()[-1].replace('\n', '')
return cpu_usage
except:
return None
def get_bandwidth_usage(output):
try:
with open(VINES_PATH + '/resources/bandwidth_usage', 'r') as f_bw:
rx, tx = f_bw.readlines()[-1].replace('\n', '').split(' ')
if (output == 'rx'):
return rx
else:
return tx
except:
return None
# Runs a shell command, checks for success or error and returns the command response data
def run_shell_cmd(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
if error:
return {"status":"ERROR","data":error}
if output == "":
output = "None"
return {"status":"OK","data":output}
# Get function status generically
def get_vnf_function_status(function_name):
# The status.sh script searches for PID of 'function_name' and returns 'Running' if found, otherwise 'Stopped'
cmd = "sudo FENDE/Scripts/status.sh "+function_name
response = run_shell_cmd(cmd)
if response['status']!="OK":
response['data'] = "Could not run status.sh script: " + response['data']
return response
return {"status":"OK","data":response['data']}
def build_response(status,data):
return jsonify(
{
"setsfcforwardingresponse": {
"status": status,
"data": data
}
})
``` |
{
"source": "josefloresgabriel/Screenlogger-in-python",
"score": 3
} |
#### File: josefloresgabriel/Screenlogger-in-python/screenlogger.py
```python
import pyautogui
from pynput.mouse import Button, Listener
from datetime import *
def clicou(x,y, botao, pressionado):
if pressionado == True:
im1 = pyautogui.screenshot()
im1.save(f'{datetime.now()}.png')
listener = Listener(on_click=clicou)
listener.start()
listener.join()
``` |
{
"source": "josefmonje/slack-lambda-inviter",
"score": 2
} |
#### File: josefmonje/slack-lambda-inviter/lambda_function.py
```python
import json
import os
from urlparse import parse_qs
from flask_lambda import FlaskLambda
import requests
app = FlaskLambda(__name__)
def convert_case(word):
"""Return Camel Case from snake_case."""
words = [x.capitalize() or '_' for x in word.split('_')]
return ''.join(words)
def extract_body(event):
"""Return json from the request body."""
json_input = parse_qs(event['body'])
for each in json_input.items():
json_input[each[0]] = each[1][0]
return json_input
def create_response(key, value):
"""Return generic AWS Lamba proxy response object format."""
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({key: value})
}
def validate_keys(json):
"""Check if json has the right keys, convert array values to non-array, return errors."""
token = os.environ.get('token', None) # optional token in env is used
if token:
json['token'] = token
keys = ['team_name', 'email', 'token']
errors = []
for key in keys:
if key not in json.keys():
errors.append('no_{0}'.format(key))
return errors
@app.route('/', methods=['POST'])
def lambda_handler(event, context):
# flask-lambda returns just the data on test and returns the event on http request
data = event
if 'httpMethod' in event.keys():
data = extract_body(event)
# data validation
errors = validate_keys(data)
if errors:
return create_response('error', errors)
# slack API
team_name = data.pop('team_name')
url = "https://{0}.slack.com/api/users.admin.invite".format(team_name)
r = requests.post(url, data=data)
if not r.ok: # did not work
raise Exception(convert_case('api_error'))
# it worked but there were errors, raise them as exceptions
data = r.json()
if 'ok' not in data.keys():
exception = type(convert_case(data['error']), (Exception,), {})
raise exception(data['error'])
return create_response('result', data) # it worked
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "josefmtd/idsl-gauge",
"score": 3
} |
#### File: idsl-gauge/idsl_gauge/__init__.py
```python
import pandas
import requests
import json
import datetime
BASE_URL = 'https://webcritech.jrc.ec.europa.eu/TAD_server/'
GROUPS_API = 'api/Groups/GetGEOJSON?group={}&maxLatency={}'
DATA_API = 'api/Data/Get/{}?tMin={}%20{}&tMax={}%20{}&nRec={}&mode=json'
class IDSLGauge:
"""
Python Wrapper for IDSL Tide Gauges API from JRC WebCritech
Attributes
----------
group: string
group name (e.g. Indonesia or IDSL)
max_latency: int
maximum latency in seconds
metadata: pandas.DataFrame
metadata from available stations
stations: list
list of station_id
data: pandas.DataFrame
tide gauges data
"""
def __init__(self, group, max_latency):
"""
IDSL Tide Gauges Data from JRC Webcritech
Attributes
----------
group: group
"""
self.group = group
self.max_latency = max_latency
def get_metadata(self):
"""
Get metadata from active tide gauge stations
"""
# Get stations metadata from GeoJSON API
url = BASE_URL + GROUPS_API.format(self.group, self.max_latency)
req = requests.get(url)
assert(req.status_code == 200)
# Create stations DataFrame
summary = json.loads(req.text)
df = pandas.DataFrame(summary[0]['features'])
df.set_index('id', inplace = True, drop = True)
# Obtain Latitude and Longitude
geometry = df.geometry.apply(pandas.Series).copy()
coordinates = geometry.coordinates.apply(pandas.Series).copy()
coordinates.columns = ['Longitude', 'Latitude']
# Obtain metadata
properties = df.properties.apply(pandas.Series).copy()
latency = properties.Latency.apply(pandas.Series).copy()
# Dropping unused columns
properties_drop = ['LastData', 'Latency', 'GroupColor']
properties.drop(properties_drop, axis = 1, inplace = True)
latency_drop = ['Literal', 'Color']
latency.drop(latency_drop, axis = 1, inplace = True)
latency.columns = ['Latency_Seconds']
# Add attributes stations and list
self.metadata = pandas.concat([coordinates, properties, \
latency], axis = 1)
self.stations = self.metadata.index.to_list()
def get_gauges_data(self, station_id, start, end, max_records = 5000):
"""
Get data from station_id from start to end
Parameters
----------
station_id: string
ID of the IDSL tide gauge station
start: datetime.datetime
start datetime
end: datetime.datetime
end datetime
max_records: int
maximum number of records
Returns
-------
data: pandas.DataFrame
station_id data queried from JSON API converted to DataFrame
"""
# Get data from JSON data API
url = BASE_URL + DATA_API.format(station_id, \
start.date().isoformat(), start.time().isoformat()[:8], \
end.date().isoformat(), end.time().isoformat()[:8], \
max_records)
req = requests.get(url)
assert(req.status_code == 200)
summary = json.loads(req.text)
if len(summary) > 0:
self.data = pandas.DataFrame(summary)
self._reformat_dataframe()
return self.data.copy()
else:
return None
def _reformat_dataframe(self):
"""
Reformat and rename the DataFrame columns
"""
self.data.Timestamp = self.data.Timestamp.astype('datetime64[ms]')
self.data.set_index('Timestamp', inplace = True, drop = True)
df = self.data.Values.apply(pandas.Series).copy()
df.columns = ['WaterLevelRadar', 'SolarPanel', 'RmsLimit', \
'CPUTemperature', 'AmbientTemperature', 'Alert', \
'AlertSignal', 'Battery', 'Forecast30', 'Forecast300']
self.data = df.copy()
``` |
{
"source": "JosefNagelschmidt/conformal-inference",
"score": 3
} |
#### File: src/conformal_methods/testing_dgps.py
```python
import numpy as np
import matplotlib.pyplot as plt
def testing_dgp_1(n):
X = np.random.uniform(0,1,n).reshape((n,1))
mu, sigma = 0, 1 # mean and standard deviation
eps = np.random.normal(mu, sigma, n)
y = X.flatten() + X.flatten() * eps.flatten()
y = y.reshape(-1,1)
return X, y.flatten()
def testing_dgp_2(n):
X = np.random.normal(0, 1, n * 3).reshape((n, 3))
eps = np.random.normal(0, 1, n)
return X, eps.flatten()
```
#### File: src/conformal_methods/utils.py
```python
import numpy as np
import pandas as pd
from src.config import SRC
from numba import jit
from scipy.stats import norm
from scipy.stats import skewnorm
from sklearn.preprocessing import StandardScaler
def init_scoring_object(method, quantile=0.9):
def scoring_object(estimator, X, y):
if (method == "mean-based") | (method == "weighted-mean-based"):
y_pred = estimator.predict(X)
loss = np.mean((y - y_pred)**2)
return -loss.item()
if method == "quantile-based":
y_pred = estimator.predict(X)
if (quantile > 0) and (quantile < 1):
residual = y - y_pred
return -np.sum(residual * (quantile - (residual<0)))
else:
return np.nan
return scoring_object
def CQR_conformity_score(lower_quant_hat, upper_quant_hat, y_conf):
first_arg = lower_quant_hat.flatten() - y_conf.flatten()
second_arg = y_conf.flatten() - upper_quant_hat.flatten()
conf_args = np.column_stack((first_arg, second_arg))
return np.max(conf_args, axis=1)
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def cond_variance(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
cond_variance = (X_mat.flatten()) ** 2
elif error_type == "varying_squared_linear_part":
cond_variance = 1 + linear_part ** 2
# print(np.histogram(cond_variance))
elif error_type == "varying_third_moment_mu":
t_dist_part = 3.0 / (3 - 2)
cond_variance = (
t_dist_part
* (1 + 2 * np.abs(linear_part) ** 3 / np.mean(np.abs(linear_part) ** 3))
** 2
)
else:
raise ValueError("Please specify regular error_type.")
return cond_variance
def x_scale(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
scale = X_mat.flatten()
elif error_type == "varying_squared_linear_part":
scale = linear_part
elif error_type == "varying_third_moment_mu":
scale = linear_part
else:
raise ValueError("Please specify regular error_type.")
return scale
def construc_cond_metric_df(cond_variance, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
# df = pd.DataFrame(np.stack((cond_variance, interval_lengths, covered), axis=1))
df = np.stack((cond_variance, interval_lengths, covered), axis=1)
return df
def construc_cond_metric_df_simulation(x_scale, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
df = np.stack((x_scale, interval_lengths, covered), axis=1)
return df
@jit(nopython=True)
def conditional_cdf_hat(y_grid, y_vec, q_hat_conf_mat, q_hat_pred_mat):
# preallocate matrix for the predicted cdf values
f_hat_y_mat = np.zeros((q_hat_pred_mat.shape[0], len(y_grid.flatten())))
###
q_hat_conf_less_y_mat = q_hat_conf_mat <= y_vec.reshape(-1, 1)
f_hat_conf = (1.0 / q_hat_conf_less_y_mat.shape[1]) * np.sum(
q_hat_conf_less_y_mat, axis=1
)
###
for i, y in enumerate(y_grid):
q_hat_pred_less_y = q_hat_pred_mat <= y
f_hat_y = (1.0 / q_hat_pred_less_y.shape[1]) * np.sum(q_hat_pred_less_y, axis=1)
f_hat_y_mat[:, i] = f_hat_y
return f_hat_conf, f_hat_y_mat
@jit(nopython=True)
def p_y_func(alpha, y_grid, f_hat_conf, f_hat_y_mat):
f_hat_conf_abs_dev = np.abs(f_hat_conf.flatten() - 0.5)
f_hat_y_mat_abs_dev = np.abs(f_hat_y_mat - 0.5)
conf_set_list = []
# fix the X_n+1 prediction point:
for i in range(f_hat_y_mat.shape[0]):
conf_set = []
# fix the y grid value:
for j, y in enumerate(y_grid):
val = (
1
/ (len(f_hat_conf_abs_dev) + 1)
* np.sum(f_hat_y_mat_abs_dev[i, j] <= f_hat_conf_abs_dev)
)
if val > alpha:
conf_set.append(y)
conf_set_list.append(conf_set)
return conf_set_list
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def calc_normal_params(mu_1, mu_0, X, heteroscedastic):
means = mu_1 - mu_0
if heteroscedastic:
variances = X[:,0]**2 + np.ones(len(means))
else:
variances = np.ones(len(means)) * 2
return means, variances
def get_oracle_interval(lower, upper):
def oracle_interval(mean, var):
std = np.sqrt(var)
norm_obj = norm(loc=mean,scale=std)
quantiles = norm_obj.ppf([lower, upper])
return quantiles
return oracle_interval
def get_oracle_intervals(means, variances):
oracle_interval_fun = get_oracle_interval(0.05, 0.95)
result = list(map(oracle_interval_fun, means, variances))
return result
def share_signif_fun(oracle_intervals, ite_pred_intervals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
oracle_signif_signs = np.sign(np.mean(oracle_intervals, axis=1))
predicted_signif_signs = np.sign(np.mean(ite_pred_intervals, axis=1))
same_sign = (oracle_signif_signs == predicted_signif_signs)
correctly_signif_given_oracle_signif = which_oracle_ints_signif & which_predicted_ints_signif & same_sign
if np.sum(which_oracle_ints_signif) == 0:
return -1.0
else:
return np.sum(correctly_signif_given_oracle_signif) / np.sum(which_oracle_ints_signif)
def share_signif_oracles(oracle_intervals, ite_vals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_oracles_given_ite_not_zero = which_oracle_ints_signif & which_ites_not_zero
return np.sum(signif_oracles_given_ite_not_zero) / len(oracle_intervals)
def share_signif_intervals_given_ite_not_zero(ite_pred_intervals, ite_vals):
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_intervals_given_ite_not_zero = which_predicted_ints_signif & which_ites_not_zero
return np.sum(signif_intervals_given_ite_not_zero) / len(ite_pred_intervals)
def generate_treatment_effects_helper(X, treatment_case):
n, p = X.shape
if treatment_case == "binary":
condition = 1 * (X[:,0] > 0.0)
treat = np.where(condition == 0, -1.0, condition)
tau_x = treat
elif treatment_case == "gaussian":
beta_treat = np.ones(p)
half_point = round(p/2)
beta_treat[:half_point] = 1.0
beta_treat[half_point:] = 0.0
# division by true standard deviation of the sum to yield variance 1
tau_x = (X @ beta_treat) / np.sqrt(half_point)
else:
raise ValueError("Please specify a valid main effect type.")
return tau_x
def dgp_ate_zero(n, p, effect_size, main_effect_case="const", treatment_case="binary"):
X = generate_X_fixed_positions(n = n, p=p, X_dist="normal", cor="none", standardize=False, rho=0.5)
tau_x = generate_treatment_effects_helper(X=X, treatment_case=treatment_case)
if main_effect_case == "const":
mu_1 = np.ones(n) + effect_size * tau_x
mu_0 = np.ones(n)
elif main_effect_case == "linear":
beta = np.ones(p)
beta[::2] = 0.0
mu_1 = X @ beta + effect_size * tau_x
mu_0 = X @ beta
elif main_effect_case == "non-linear":
beta = np.ones(p)
beta[::2] = 0.0
linear_part = X @ beta
base_fun = 2 * np.log(1 + np.exp(linear_part))
mu_1 = base_fun + effect_size * tau_x
mu_0 = base_fun
else:
raise ValueError("Please specify a valid main effect type.")
# noise:
eps_1 = np.random.normal(0, 1, n)
eps_0 = np.random.normal(0, 1, n)
# draw treatment assignment variable:
W = np.random.binomial(n=1, p=0.5, size=(n,))
# calculate other quantities of interest:
ite = mu_1 - mu_0 + eps_1 - eps_0
# observed y_obs depends on W:
y_obs = W * (mu_1 + eps_1) + (1 - W) * (mu_0 + eps_0)
return ite, mu_1, mu_0, eps_1, eps_0, y_obs, X, W
def generate_X_fixed_positions(
n,
p,
X_dist="normal",
cor="none",
standardize=False,
rho=0.15,
k=5,
alpha=5,
uniform_lower=0,
uniform_upper=1,
):
# Generate X matrix
if X_dist == "normal":
X = np.random.normal(0, 1, n * p).reshape((n, p))
if X_dist == "binom":
X = np.random.binomial(n=1, p=0.5, size=(n, p))
if X_dist == "uniform":
X = np.random.uniform(uniform_lower, uniform_upper, n * p).reshape((n, p))
if X_dist == "skewed_normal":
X = skewnorm.rvs(alpha, size=n * p).reshape((n, p))
if X_dist == "mixture":
X = np.zeros(n * p).reshape((n, p))
x1 = np.random.normal(0, 1, n * p).reshape((n, p))
x2 = np.random.binomial(n=1, p=0.5, size=(n, p))
x3 = skewnorm.rvs(5, size=n * p).reshape((n, p))
u = np.random.uniform(0, 1, p)
i1 = u <= 1 / 3
i2 = (1 / 3 < u) & (u <= 2 / 3)
i3 = u > 2 / 3
X[:, i1] = x1[:, i1]
X[:, i2] = x2[:, i2]
X[:, i3] = x3[:, i3]
# setting the decisive 5 covariates to a fixed distribution for later purposes
X[:, 0] = np.random.normal(0, 1, n)
X[:, 4] = np.random.binomial(n=1, p=0.5, size=n)
X[:, 6] = skewnorm.rvs(5, size=n)
X[:, 8] = skewnorm.rvs(5, size=n)
X[:, 9] = np.random.binomial(n=1, p=0.5, size=n)
# Pairwise correlation
if cor == "pair":
b = (-2 * np.sqrt(1 - rho) + 2 * np.sqrt((1 - rho) + p * rho)) / (2 * p)
a = b + np.sqrt(1 - rho)
# calculate symmetric square root of p x p matrix whose diagonals are 1 and off diagonals are rho:
sig_half = np.full(shape=(p, p), fill_value=b)
np.fill_diagonal(sig_half, a)
X = X @ sig_half
# Auto-correlation
if cor == "auto":
for j in range(p):
mat = X[:, max(0, j - k) : j + 1]
wts = np.random.uniform(0, 1, mat.shape[1]).flatten()
wts = wts / np.sum(wts)
tmp = mat * wts
X[:, j] = np.array(np.mean(tmp, axis=1))
# Standardize, if necessary
if standardize:
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
return X
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def generate_y_fixed_positions(
X_mat,
eps_dist="normal",
error_type="const",
functional_form="linear",
sigma=1,
force_beta_positive=True,
non_zero_beta_count=None,
magnitude_nonzero_coeffs=1,
signal_noise_ratio=None,
alpha=5,
df=4,
):
n, p = X_mat.shape
if non_zero_beta_count is None:
non_zero_beta_count = int(np.ceil(p / 10))
if non_zero_beta_count is not None:
if non_zero_beta_count > p:
raise ValueError(
"Number of non-zero coefficients cannot exceed the number of covariates in X."
)
else:
non_zero_beta_count = int(non_zero_beta_count)
# calculate the linear part of the conditional expectation function, or the error multiplicator:
# Sample s variables uniformly at random, define true coefficients
if eps_dist == "t":
non_zero_coeffs = np.array([0, 4, 6, 8, 9])
beta = np.zeros(p)
beta[non_zero_coeffs] = np.random.choice(
np.array([-1, 1]) * magnitude_nonzero_coeffs,
size=non_zero_beta_count,
replace=True,
)
if force_beta_positive:
beta = np.abs(beta)
linear_part = X_mat @ beta
else:
non_zero_coeffs = np.arange(non_zero_beta_count)
beta = np.zeros(p)
beta[non_zero_coeffs] = np.random.choice(
np.array([-1, 1]) * magnitude_nonzero_coeffs,
size=non_zero_beta_count,
replace=True,
)
if force_beta_positive:
beta = np.abs(beta)
linear_part = X_mat @ beta
# main effect:
if functional_form == "linear":
mu = linear_part
elif functional_form == "sine":
mu = 2 * np.sin(np.pi * linear_part) + np.pi * linear_part
elif functional_form == "stochastic_poisson":
if p > 1:
raise ValueError("This dgp can only be initialized with p = 1.")
else:
x = X_mat.flatten()
ax = 0 * x
for i in range(len(x)):
ax[i] = np.random.poisson(np.sin(x[i]) ** 2 + 0.1) + 0.03 * x[
i
] * np.random.randn(1)
ax[i] += 25 * (np.random.uniform(0, 1, 1) < 0.01) * np.random.randn(1)
y = ax.astype(np.float32)
return y
else:
raise ValueError("Please specify regular functional form.")
# error:
if eps_dist == "normal":
eps = np.random.normal(0, 1, n)
elif eps_dist == "uniform":
eps = np.random.uniform(0, 1, n)
elif eps_dist == "t":
eps = np.random.standard_t(df=df, size=n)
elif eps_dist == "skewed_normal":
eps = skewnorm.rvs(alpha, size=n)
else:
raise ValueError("Please specify regular error distribution.")
if error_type == "const":
sx = np.ones(n)
sigma_vec = sigma * sx
elif error_type == "simple_linear":
sx = linear_part
sigma_vec = sigma * sx
elif error_type == "varying_third_moment_mu":
sx = 1 + 2 * np.abs(linear_part) ** 3 / 38.73
sigma_vec = sigma * sx
elif error_type == "varying_squared_linear_part":
sx = np.sqrt(1 + (linear_part) ** 2)
sigma_vec = sigma * sx
else:
raise ValueError("Please specify regular error type.")
assert eps.shape == (n,)
assert sigma_vec.shape == (n,)
assert mu.shape == (n,)
if signal_noise_ratio is not None:
mu = (
mu
* np.sqrt(signal_noise_ratio)
* np.sqrt(np.mean(sigma_vec ** 2))
/ np.std(mu)
)
assert mu.shape == (n,)
y = mu + eps * sigma_vec
if functional_form != "stochastic_poisson":
return y, eps, sigma_vec, mu, beta
def x_scale(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
scale = X_mat.flatten()
elif error_type == "varying_squared_linear_part":
scale = linear_part
elif error_type == "varying_third_moment_mu":
scale = linear_part
else:
raise ValueError("Please specify regular error_type.")
return scale
def construc_cond_metric_df_simulation(x_scale, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
df = np.stack((x_scale, interval_lengths, covered), axis=1)
return df
def get_conditional_variances(process_type):
if (
process_type == 3
): # chernozhukov example distributional conformal prediction (2021)
x_grid = np.linspace(0, 1, 2000)
return x_grid, np.array(x_grid) ** 2
if process_type == 4: # romano table
src = str(SRC)
df = pd.read_csv(src + "/simulations/helper_tables/romano_table_cond_variances.csv")
return np.array(df["X"]), np.array(df["cond_var"])
if process_type == 2:
x_grid = np.linspace(0, 5, 2000)
cond_var = 1 + x_grid ** 2
return x_grid, cond_var
if process_type == 1:
x_grid = np.linspace(-5, 5, 2000)
cond_var = 2 * (1 + (2 * np.abs(x_grid) ** 3) / 38.73) ** 2
return x_grid, cond_var
```
#### File: src/legacy/simulation_conditional_updated.py
```python
from itertools import product
from sys import argv
import numpy as np
import pandas as pd
from functions import *
from statsmodels.nonparametric.kernel_regression import KernelReg
def conc_tuples(index_prep, method, i):
conc = (method, i)
return index_prep + conc
# (n, p, X_dist, X_correlation, eps_dist, error_type, functional_form, non_zero_beta_count, uniform_upper, standardized_X, method, sim_id)
n_sims = 200
process_type = int(argv[1]) # one of: 1,2,3,4
if process_type == 1:
print("Initialize process 1.")
index_prep = [
# third moment error term:
(
200,
10,
"mixture",
"pair",
"t",
"varying_third_moment_mu",
"linear",
5,
1,
True,
)
]
elif process_type == 2:
print("Initialize process 2.")
index_prep = [
# varying_squared_linear_part error term:
(
1000,
100,
"uniform",
"none",
"normal",
"varying_squared_linear_part",
"sine",
5,
1,
False,
)
]
elif process_type == 3:
print("Initialize process 3.")
index_prep = [
# chernozhukov example distributional conformal prediction (2021)
(1000, 1, "uniform", "none", "normal", "simple_linear", "linear", 1, 1, False)
]
elif process_type == 4:
print("Initialize process 4.")
index_prep = [
# Conformalized Quantile Regression(2019), eq. 18
(
1000,
1,
"uniform",
"none",
"normal",
"simple_linear",
"stochastic_poisson",
1,
5,
False,
)
]
else:
print("No process 1-4 specified.")
print(index_prep)
methods = {
"pred_band_mean_based": pred_band_mean_based,
"pred_band_weighted_mean_based": pred_band_weighted_mean_based,
"pred_band_quantile_based_rf": pred_band_quantile_based_rf,
"pred_band_cdf_based": pred_band_cdf_based,
}
methods_keys = list(methods.keys())
simulation_ids = np.arange(n_sims)
index = product(index_prep, methods_keys, simulation_ids)
index = flatten(l=list(index))
index = pd.MultiIndex.from_tuples(
index,
names=(
"n",
"p",
"X_dist",
"X_correlation",
"eps_dist",
"error_type",
"functional_form",
"non_zero_beta_count",
"uniform_upper",
"standardized_X",
"method",
"simulation_id",
),
)
df = pd.DataFrame(
columns=[
"mean_interval_length",
"mean_coverage",
"conditional_metrics_df",
"nodesize",
],
index=index,
)
same_case_as_previous_round = False
for index in df.index:
if index[11] != 0:
print("Previous index is: " + str(previous_index))
print("Current index is: " + str(index))
if index[11] != 0:
same_case_as_previous_round = True
for i in range(len(previous_index) - 1):
if previous_index[i] != index[i]:
same_case_as_previous_round = False
# pred_samples = int(round(15000 / n_sims))
pred_samples = 150
total_sample = index[0] + pred_samples
X = generate_X_fixed_positions(
n=total_sample,
p=index[1],
X_dist=index[2],
cor=index[3],
standardize=index[9],
uniform_upper=index[8],
)
if index[6] == "stochastic_poisson":
y = generate_y_fixed_positions_beta(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
else:
y, eps, sigma_vec, mu, beta = generate_y_fixed_positions_beta(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
X_predict, X_split_again, y_predict, y_split_again = train_test_split(
X, y, train_size=pred_samples
)
X_train, X_conf, y_train, y_conf = train_test_split(
X_split_again, y_split_again, test_size=0.5, train_size=0.5
)
if (index[11] == 0) or (not same_case_as_previous_round):
if (index[10] == "pred_band_quantile_based_rf") or (
index[10] == "pred_band_cdf_based"
):
nodesize_opt, mat_overview = tune_nodesize_quantile_forest(
X=X,
y=y,
nodesize_grid=[5,10,15,25,50],
# nodesize_grid=[100],
pred_band_method=methods[index[10]],
n_simulations=1,
)
elif (index[10] == "pred_band_mean_based") or (
index[10] == "pred_band_weighted_mean_based"
):
nodesize_opt, mat_overview = tune_mean_based_rf(
X=X,
y=y,
min_samples_leaf_grid=[5,10,15,25,50],
# min_samples_leaf_grid=[100],
pred_band_method=methods[index[10]],
n_simulations=1,
)
else:
raise ValueError("A problem with the prediction band method occured.")
res = methods[index[10]](
X_predict=X_predict,
X_train=X_train,
y_train=y_train,
X_conf=X_conf,
y_conf=y_conf,
nodesize=int(nodesize_opt),
)
length_bands = res[:, 1] - res[:, 0]
mean_interval_length = np.mean(length_bands)
in_the_range = np.sum(
(y_predict.flatten() >= res[:, 0]) & (y_predict.flatten() <= res[:, 1])
)
mean_coverage = in_the_range / len(y_predict)
if index[5] == "simple_linear": # these are process_types 3 and 4 (univariate)
x_scale_diag = x_scale(X_mat=X_predict, error_type=index[5])
else:
linear_part = X_predict @ beta
x_scale_diag = x_scale(
X_mat=X_predict, error_type=index[5], linear_part=linear_part
)
cond_metrics_df = construc_cond_metric_df_simulation(
x_scale=x_scale_diag, result_pred_bands=res, y_predict=y_predict
)
df.at[index, "mean_interval_length"] = mean_interval_length
df.at[index, "mean_coverage"] = mean_coverage
df.at[index, "conditional_metrics_df"] = cond_metrics_df
df.at[index, "nodesize"] = nodesize_opt
previous_index = index
print("Nodesize opt is: " + str(nodesize_opt))
result = (
df[["mean_interval_length", "mean_coverage"]].groupby(by=["method"]).sum() / n_sims
)
result.to_csv("process_" + str(process_type) + "_" + "x_scale_averages.csv")
for i in range(n_sims):
if i == 0:
res_mean_based = df.at[
conc_tuples(index_prep=index_prep[0], method="pred_band_mean_based", i=i),
"conditional_metrics_df",
]
# res_mean_based = df.at[(200, 10, "mixture", "auto", "t", "varying_third_moment_mu", "linear", 5, 1, True, "pred_band_mean_based", i), "conditional_metrics_df"]
else:
tmp = df.at[
conc_tuples(index_prep=index_prep[0], method="pred_band_mean_based", i=i),
"conditional_metrics_df",
]
res_mean_based = np.concatenate((res_mean_based, tmp), axis=0)
for i in range(n_sims):
if i == 0:
res_weighted_mean_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="pred_band_weighted_mean_based", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="pred_band_weighted_mean_based", i=i
),
"conditional_metrics_df",
]
res_weighted_mean_based = np.concatenate((res_weighted_mean_based, tmp), axis=0)
for i in range(n_sims):
if i == 0:
res_quantile_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="pred_band_quantile_based_rf", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="pred_band_quantile_based_rf", i=i
),
"conditional_metrics_df",
]
res_quantile_based = np.concatenate((res_quantile_based, tmp), axis=0)
for i in range(n_sims):
if i == 0:
res_cdf_based = df.at[
conc_tuples(index_prep=index_prep[0], method="pred_band_cdf_based", i=i),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(index_prep=index_prep[0], method="pred_band_cdf_based", i=i),
"conditional_metrics_df",
]
res_cdf_based = np.concatenate((res_cdf_based, tmp), axis=0)
upper = 0.999
lower = 0.001
df_mean_based = pd.DataFrame(
{
"x_scale": res_mean_based[:, 0],
"length": res_mean_based[:, 1],
"coverage": res_mean_based[:, 2],
}
)
df_w_mean_based = pd.DataFrame(
{
"x_scale": res_weighted_mean_based[:, 0],
"length": res_weighted_mean_based[:, 1],
"coverage": res_weighted_mean_based[:, 2],
}
)
df_quantile_based = pd.DataFrame(
{
"x_scale": res_quantile_based[:, 0],
"length": res_quantile_based[:, 1],
"coverage": res_quantile_based[:, 2],
}
)
df_cdf_based = pd.DataFrame(
{
"x_scale": res_cdf_based[:, 0],
"length": res_cdf_based[:, 1],
"coverage": res_cdf_based[:, 2],
}
)
# if process_type == 1:
# factor = 0.3
# else:
# factor = 1
# Q1 = df_mean_based.quantile(lower)
# Q3 = df_mean_based.quantile(upper)
# IQR = Q3 - Q1
# df_mean_based_cleaned = df_mean_based[~((df_mean_based < (Q1 - 1.5 * IQR)) |(df_mean_based > (Q3 + 1.5 * IQR))).any(axis=1)]
Q3 = df_mean_based.x_scale.quantile(upper)
Q1 = df_mean_based.x_scale.quantile(lower)
df_mean_based_cleaned = df_mean_based[
(df_mean_based.x_scale < Q3) & (df_mean_based.x_scale > Q1)
]
####
# Q1 = df_w_mean_based.quantile(lower)
# Q3 = df_w_mean_based.quantile(upper)
# IQR = Q3 - Q1
# df_w_mean_based_cleaned = df_w_mean_based[~((df_w_mean_based < (Q1 - 1.5 * IQR)) |(df_w_mean_based > (Q3 + 1.5 * IQR))).any(axis=1)]
Q3 = df_w_mean_based.x_scale.quantile(upper)
Q1 = df_w_mean_based.x_scale.quantile(lower)
df_w_mean_based_cleaned = df_w_mean_based[
(df_w_mean_based.x_scale < Q3) & (df_w_mean_based.x_scale > Q1)
]
#####
# Q1 = df_quantile_based.quantile(lower)
# Q3 = df_quantile_based.quantile(upper)
# IQR = Q3 - Q1
# df_quantile_based_cleaned = df_quantile_based[~((df_quantile_based < (Q1 - 1.5 * IQR)) |(df_quantile_based > (Q3 + 1.5 * IQR))).any(axis=1)]
Q3 = df_quantile_based.x_scale.quantile(upper)
Q1 = df_quantile_based.x_scale.quantile(lower)
df_quantile_based_cleaned = df_quantile_based[
(df_quantile_based.x_scale < Q3) & (df_quantile_based.x_scale > Q1)
]
#######
# Q1 = df_cdf_based.quantile(lower)
# Q3 = df_cdf_based.quantile(upper)
# IQR = Q3 - Q1
# df_cdf_based_cleaned = df_cdf_based[~((df_cdf_based < (Q1 - 1.5 * IQR)) |(df_cdf_based > (Q3 + 1.5 * IQR))).any(axis=1)]
Q3 = df_cdf_based.x_scale.quantile(upper)
Q1 = df_cdf_based.x_scale.quantile(lower)
df_cdf_based_cleaned = df_cdf_based[
(df_cdf_based.x_scale < Q3) & (df_cdf_based.x_scale > Q1)
]
x_scales_merged = np.concatenate(
(
np.array(df_mean_based_cleaned["x_scale"]),
np.array(df_w_mean_based_cleaned["x_scale"]),
np.array(df_quantile_based_cleaned["x_scale"]),
np.array(df_cdf_based_cleaned["x_scale"]),
)
)
minimum = np.min(x_scales_merged)
maximum = np.max(x_scales_merged)
grid = np.linspace(minimum, maximum, 1000)
print("Start.")
df_mean_based_cleaned.to_csv("process_" + str(process_type) + "_" + "df_mean_based_cleaned.csv")
df_w_mean_based_cleaned.to_csv("process_" + str(process_type) + "_" + "df_w_mean_based_cleaned.csv")
df_quantile_based_cleaned.to_csv("process_" + str(process_type) + "_" + "df_quantile_based_cleaned.csv")
df_cdf_based_cleaned.to_csv("process_" + str(process_type) + "_" + "df_cdf_based_cleaned.csv")
for mode in ["coverage", "length"]:
if mode == "coverage":
print("Coverage stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["coverage"],
exog=df_mean_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["coverage"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["coverage"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["coverage"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_coverage": kernel_fit_cov_mean_based,
"w_mean_based_cond_coverage": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_coverage": kernel_fit_cov_quantile_based,
"cdf_based_cond_coverage": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(
"process_" + str(process_type) + "_" + mode + "_x_scale" + ".csv"
)
elif mode == "length":
print("Length stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["length"],
exog=df_mean_based_cleaned["x_scale"],
var_type="c",
reg_type="lc",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["length"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["length"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["length"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="c",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_length": kernel_fit_cov_mean_based,
"w_mean_based_cond_length": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_length": kernel_fit_cov_quantile_based,
"cdf_based_cond_length": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(
"process_" + str(process_type) + "_" + mode + "n_1000" + ".csv"
)
else:
print("Mode not specified.")
```
#### File: src/simulations/task_cond_perf_simulations.py
```python
import pytask
from src.config import BLD
from src.config import SRC
import numpy as np
import json
from itertools import product
import pandas as pd
from statsmodels.nonparametric.kernel_regression import KernelReg
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from src.conformal_methods.utils import flatten, generate_X_fixed_positions, generate_y_fixed_positions, x_scale, construc_cond_metric_df_simulation
from src.conformal_methods.split_conformal_inference import SplitConformalRegressor
from src.conformal_methods.r_objects import QuantregForest
def conc_tuples(index_prep, method, i):
conc = (method, i)
return index_prep + conc
def run_simulation(specs, produces):
methods = ["mean-based", "weighted-mean-based", "quantile-based", "cdf-based"]
simulation_ids = np.arange(specs["n_sims"])
index_prep = [(
specs["n"],
specs["p"],
specs["X_dist"],
specs["X_correlation"],
specs["eps_dist"],
specs["error_type"],
specs["functional_form"],
specs["non_zero_beta_count"],
specs["uniform_upper"],
bool(int(specs["standardized_X"])),
)
]
index = product(index_prep, methods, simulation_ids)
index = flatten(l=list(index))
index = pd.MultiIndex.from_tuples(
index,
names=(
"n",
"p",
"X_dist",
"X_correlation",
"eps_dist",
"error_type",
"functional_form",
"non_zero_beta_count",
"uniform_upper",
"standardized_X",
"method",
"simulation_id",
),
)
df = pd.DataFrame(
columns=[
"mean_interval_length",
"mean_coverage",
"conditional_metrics_df",
],
index=index,
)
same_case_as_previous_round = False
for index in df.index:
if index[11] != 0:
print("Previous index is: " + str(previous_index))
print("Current index is: " + str(index))
if index[11] != 0:
same_case_as_previous_round = True
for i in range(len(previous_index) - 1):
if previous_index[i] != index[i]:
same_case_as_previous_round = False
total_sample = index[0] + specs["pred_samples"]
# draw samples:
X = generate_X_fixed_positions(
n=total_sample,
p=index[1],
X_dist=index[2],
cor=index[3],
standardize=index[9],
uniform_upper=index[8],
)
if index[6] == "stochastic_poisson":
y = generate_y_fixed_positions(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
else:
y, eps, sigma_vec, mu, beta = generate_y_fixed_positions(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
X_predict, X_split_again, y_predict, y_split_again = train_test_split(
X, y, train_size=specs["pred_samples"]
)
# X_train, X_conf, y_train, y_conf = train_test_split(
# X_split_again, y_split_again, train_size=specs["train_size"]
# )
if X_split_again.shape[1] > 1:
max_features = round(X_split_again.shape[1] / 3)
elif X_split_again.shape[1] == 1:
max_features = 1
else:
raise ValueError("X has a dimensionality problem, missing regressors.")
if (index[10] == "mean-based"):
reg = SplitConformalRegressor(RandomForestRegressor, method="mean-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"min_samples_leaf": specs["nodesize"],
"max_features": max_features,
"n_estimators": specs["n_estimators"]})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "weighted-mean-based"):
reg = SplitConformalRegressor(RandomForestRegressor, method="weighted-mean-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"min_samples_leaf": specs["nodesize"],
"max_features": max_features,
"n_estimators": specs["n_estimators"]})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "quantile-based"):
reg = SplitConformalRegressor(QuantregForest, method="quantile-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"nodesize": specs["nodesize"], "mtry": max_features})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "cdf-based"):
reg = SplitConformalRegressor(QuantregForest, method="cdf-based", conf_size=1-specs["train_size"])
reg = reg.fit(X=X_split_again, y=y_split_again, params={"nodesize": specs["nodesize"], "mtry": max_features})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
else:
raise ValueError("Method misspecified.")
# determine metrics:
length_bands = res[:, 1] - res[:, 0]
mean_interval_length = np.mean(length_bands)
in_the_range = np.sum((y_predict.flatten() >= res[:, 0]) & (y_predict.flatten() <= res[:, 1]))
mean_coverage = in_the_range / len(y_predict)
# this determines which x-scale should be used for the later plots (X in univariate, or X*beta in multivariate case)
if index[5] == "simple_linear": # these are process_types 3 and 4 (univariate)
x_scale_diag = x_scale(X_mat=X_predict, error_type=index[5])
else:
linear_part = X_predict @ beta
x_scale_diag = x_scale(X_mat=X_predict, error_type=index[5], linear_part=linear_part)
cond_metrics_df = construc_cond_metric_df_simulation(x_scale=x_scale_diag, result_pred_bands=res, y_predict=y_predict)
df.at[index, "mean_interval_length"] = mean_interval_length
df.at[index, "mean_coverage"] = mean_coverage
df.at[index, "conditional_metrics_df"] = cond_metrics_df
previous_index = index
# after for loop and calculation, write average metrics into file:
result = (df[["mean_interval_length", "mean_coverage"]].groupby(by=["method"]).sum() / specs["n_sims"])
result.to_csv(produces["average_metrics_df"])
# the following generates the kernel regression estimates for the four methods:
for i in range(specs["n_sims"]):
if i == 0:
res_mean_based = df.at[conc_tuples(index_prep=index_prep[0], method="mean-based", i=i), "conditional_metrics_df",]
# res_mean_based = df.at[(200, 10, "mixture", "auto", "t", "varying_third_moment_mu", "linear", 5, 1, True, "pred_band_mean_based", i), "conditional_metrics_df"]
else:
tmp = df.at[conc_tuples(index_prep=index_prep[0], method="mean-based", i=i), "conditional_metrics_df",]
res_mean_based = np.concatenate((res_mean_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_weighted_mean_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="weighted-mean-based", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="weighted-mean-based", i=i
),
"conditional_metrics_df",
]
res_weighted_mean_based = np.concatenate((res_weighted_mean_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_quantile_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="quantile-based", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="quantile-based", i=i
),
"conditional_metrics_df",
]
res_quantile_based = np.concatenate((res_quantile_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_cdf_based = df.at[
conc_tuples(index_prep=index_prep[0], method="cdf-based", i=i),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(index_prep=index_prep[0], method="cdf-based", i=i),
"conditional_metrics_df",
]
res_cdf_based = np.concatenate((res_cdf_based, tmp), axis=0)
upper = 0.999
lower = 0.001
df_mean_based = pd.DataFrame(
{
"x_scale": res_mean_based[:, 0],
"length": res_mean_based[:, 1],
"coverage": res_mean_based[:, 2],
}
)
df_w_mean_based = pd.DataFrame(
{
"x_scale": res_weighted_mean_based[:, 0],
"length": res_weighted_mean_based[:, 1],
"coverage": res_weighted_mean_based[:, 2],
}
)
df_quantile_based = pd.DataFrame(
{
"x_scale": res_quantile_based[:, 0],
"length": res_quantile_based[:, 1],
"coverage": res_quantile_based[:, 2],
}
)
df_cdf_based = pd.DataFrame(
{
"x_scale": res_cdf_based[:, 0],
"length": res_cdf_based[:, 1],
"coverage": res_cdf_based[:, 2],
}
)
Q3 = df_mean_based.x_scale.quantile(upper)
Q1 = df_mean_based.x_scale.quantile(lower)
df_mean_based_cleaned = df_mean_based[
(df_mean_based.x_scale < Q3) & (df_mean_based.x_scale > Q1)
]
Q3 = df_w_mean_based.x_scale.quantile(upper)
Q1 = df_w_mean_based.x_scale.quantile(lower)
df_w_mean_based_cleaned = df_w_mean_based[
(df_w_mean_based.x_scale < Q3) & (df_w_mean_based.x_scale > Q1)
]
Q3 = df_quantile_based.x_scale.quantile(upper)
Q1 = df_quantile_based.x_scale.quantile(lower)
df_quantile_based_cleaned = df_quantile_based[
(df_quantile_based.x_scale < Q3) & (df_quantile_based.x_scale > Q1)
]
Q3 = df_cdf_based.x_scale.quantile(upper)
Q1 = df_cdf_based.x_scale.quantile(lower)
df_cdf_based_cleaned = df_cdf_based[
(df_cdf_based.x_scale < Q3) & (df_cdf_based.x_scale > Q1)
]
x_scales_merged = np.concatenate(
(
np.array(df_mean_based_cleaned["x_scale"]),
np.array(df_w_mean_based_cleaned["x_scale"]),
np.array(df_quantile_based_cleaned["x_scale"]),
np.array(df_cdf_based_cleaned["x_scale"]),
)
)
minimum = np.min(x_scales_merged)
maximum = np.max(x_scales_merged)
grid = np.linspace(minimum, maximum, 1000)
print("Start.")
df_mean_based_cleaned.to_csv(produces["conditional_res_mean_based"])
df_w_mean_based_cleaned.to_csv(produces["conditional_res_w_mean_based"])
df_quantile_based_cleaned.to_csv(produces["conditional_res_quantile_based"])
df_cdf_based_cleaned.to_csv(produces["conditional_res_cdf_based"])
# generate kernel estimates:
for mode in ["coverage", "length"]:
if mode == "coverage":
print("Coverage stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["coverage"],
exog=df_mean_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["coverage"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["coverage"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["coverage"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_coverage": kernel_fit_cov_mean_based,
"w_mean_based_cond_coverage": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_coverage": kernel_fit_cov_quantile_based,
"cdf_based_cond_coverage": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(produces["final_kernel_estimated_coverage"])
elif mode == "length":
print("Length stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["length"],
exog=df_mean_based_cleaned["x_scale"],
var_type="c",
reg_type="lc",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["length"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["length"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["length"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="c",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_length": kernel_fit_cov_mean_based,
"w_mean_based_cond_length": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_length": kernel_fit_cov_quantile_based,
"cdf_based_cond_length": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(produces["final_kernel_estimated_length"])
else:
print("Mode not specified.")
@pytask.mark.parametrize("depends_on, produces",
[
(
{
"type": SRC / "simulations" / "specs" / f"cond_sim_type_{type}.json",
},
{
"average_metrics_df": BLD / "simulations" / "cond_perf_simulations" / f"average_results_{type}.csv",
"conditional_res_mean_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_mean_based_{type}.csv",
"conditional_res_w_mean_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_w_mean_based_{type}.csv",
"conditional_res_quantile_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_quantile_based_{type}.csv",
"conditional_res_cdf_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_cdf_based_{type}.csv",
"final_kernel_estimated_coverage": BLD / "simulations" / "cond_perf_simulations" / f"kernel_coverage_results_{type}.csv",
"final_kernel_estimated_length": BLD / "simulations" / "cond_perf_simulations" / f"kernel_length_results_{type}.csv",
}
)
for type in [1,2,3,4]
],
)
def task_cond_perf_simulations(depends_on, produces):
# dictionary imported into "specs":
specs = json.loads(depends_on["type"].read_text(encoding="utf-8"))
run_simulation(specs, produces)
```
#### File: src/simulations/task_plot_cond_perf_sim_results.py
```python
import pytask
from src.config import BLD
from src.config import SRC
import pandas as pd
import matplotlib.pyplot as plt
import json
@pytask.mark.parametrize("depends_on, produces, id",
[
(
{
"df_oracle_lengths": SRC / "simulations" / "helper_tables" / f"oracle_lengths_table_process_{type}.csv",
"df_coverage": BLD / "simulations" / "cond_perf_simulations" / f"kernel_coverage_results_{type}.csv",
"df_length": BLD / "simulations" / "cond_perf_simulations" / f"kernel_length_results_{type}.csv",
"specs_dgp": SRC / "simulations" / "specs" / f"cond_sim_type_{type}.json",
},
{
"build": BLD / "simulations" / "cond_perf_simulations" / f"plot_conditional_surface_{type}.pdf",
},
{
"type": int(type),
}
)
for type in [1,2,3,4]
],
)
def task_cond_perf_sim_results(depends_on, produces, id):
if id["type"] == 1:
from src.simulations.plotting_code_cond_sims.plot_code_sim_1 import plot_process
elif id["type"] == 2:
from src.simulations.plotting_code_cond_sims.plot_code_sim_2 import plot_process
elif id["type"] == 3:
from src.simulations.plotting_code_cond_sims.plot_code_sim_3 import plot_process
elif id["type"] == 4:
from src.simulations.plotting_code_cond_sims.plot_code_sim_4 import plot_process
else:
raise ValueError("No correct type specified.")
df_length= pd.read_csv(depends_on["df_length"])
df_coverage = pd.read_csv(depends_on["df_coverage"])
df_oracle_lengths = pd.read_csv(depends_on["df_oracle_lengths"])
specs = json.loads(depends_on["specs_dgp"].read_text(encoding="utf-8"))
plot_process(produces=produces["build"], df_length=df_length,
df_coverage=df_coverage, df_oracle_lengths=df_oracle_lengths,
specs_dgp=specs)
``` |
{
"source": "josefondrej/glucoscan",
"score": 2
} |
#### File: lcd_digit_recognizer/recognition/digit_net.py
```python
from operator import itemgetter
from lcd_digit_recognizer.recognition.primitives.digit_center import DigitCenter
import numpy as np
from lcd_digit_recognizer.recognition.utils import angle_between, linear_clustering, ratio_error, span_clustering, \
stair_clustering, merge_clustering
class DigitNet(object):
def __init__(self):
self._digit_centers = []
self._are_centers_merged = False
def add_hypothesis(self, x, y, owner):
if self._are_centers_merged:
raise AssertionError("Can't add hypothesis now")
self._digit_centers.append(DigitCenter(x, y, owner))
def get_aligned_center_buckets(self):
self._ensure_merged_centers()
measured_centers = []
for center in self.centers:
for cocenter in center.cocenters:
measured_centers.append((center, cocenter, center.absolute_angle_to(cocenter)))
clusters = merge_clustering(measured_centers, lambda v: v[2], 10)
if len(clusters) > 1:
# because the metric is non-continuous between 0-360 deg
# join first and last cluster which corresponds to that part
clusters[0].extend(clusters[-1])
clusters.pop(-1)
center_buckets = []
for cluster in clusters:
if len(cluster) < 2:
continue
projection_angle = cluster[len(cluster) // 2][2] + 90
projection_rads = np.deg2rad(projection_angle)
direction = np.array([np.cos(projection_rads), np.sin(projection_rads)])
# subcluster by alignment
if direction[1] > direction[0]:
def projection_metric(v):
L = -v[0].y / direction[1]
p = v[0].x + L * direction[0]
return p, L, (p, 0)
else:
def projection_metric(v):
L = -v[0].x / direction[0]
p = v[0].y + L * direction[1]
return p, L, (0, p)
projected_cluster = []
for item in cluster:
p, L, hit = projection_metric(item)
projected_cluster.append((item[0], item[1], p, L, hit))
# subclusters = linear_clustering(projected_cluster, itemgetter(2), 20)
subclusters = merge_clustering(projected_cluster, itemgetter(2), 10)
subclusters.sort(key=lambda c: len(c), reverse=True)
# return subclusters[-7:-6] + subclusters[1:3]
for subcluster in subclusters:
subcluster = list(set(subcluster))
subcluster.sort(key=itemgetter(3), reverse=True)
if len(subcluster) < 2:
continue
center_buckets.append(subcluster)
center_buckets.sort(key=lambda b: len(b), reverse=True)
return center_buckets
@property
def centers(self):
self._ensure_merged_centers()
return self._digit_centers
def _ensure_merged_centers(self):
if self._are_centers_merged:
return # nothing to do
self._are_centers_merged = True
self._collapse_close_centers()
self._fill_neighbours()
self._fill_cocenters()
# self._prune_alone_cocenters()
def _fill_cocenters(self):
center_pool = list(self._digit_centers)
while center_pool:
current_center = center_pool.pop(0)
for center in center_pool:
if center not in current_center.neighbours:
continue
average_segment_length = (current_center.average_segment_length + center.average_segment_length) / 2
distance_length_diff = abs(current_center.distance_to(center) - average_segment_length)
if distance_length_diff / average_segment_length < 0.8:
current_center.add_cocenter(center)
def _fill_neighbours(self):
center_pool = list(self._digit_centers)
while center_pool:
current_center = center_pool.pop(0)
for center in center_pool:
distance = current_center.distance_to(center)
if distance > current_center.average_segment_length * 2.5:
continue
if distance < current_center.average_segment_length * 0.7:
continue
aligned_angle_distance = max(
current_center.aligned_angle_distance_to(center),
center.aligned_angle_distance_to(current_center)
)
if aligned_angle_distance > 15:
continue
if ratio_error(current_center.average_segment_length, center.average_segment_length) > 0.3:
continue
current_center.try_add_neighbour(center)
def _collapse_close_centers(self):
center_pool = list(self._digit_centers)
while center_pool:
current_center = center_pool.pop(0)
best_center = None
best_distance = None
for center in center_pool:
# if not current_center.can_merge_with(center):
# continue
distance = current_center.distance_to(center)
if best_distance is None or best_distance > distance:
best_distance = distance
best_center = center
if best_distance and best_distance < best_center.average_segment_length / 2:
best_center.merge_with(current_center)
self._digit_centers.remove(current_center)
```
#### File: lcd_digit_recognizer/recognition/physical_line_recognizer.py
```python
from typing import List
from lcd_digit_recognizer.recognition.primitives.open_line_wave import OpenLineWave
from lcd_digit_recognizer.recognition.primitives.physical_line import PhysicalLine
from lcd_digit_recognizer.recognition.primitives.scan_line import ScanLine
class PhysicalLineRecognizer(object):
def __init__(self, is_vertical):
self._stall_limit = 1
self._physical_line_skip_threshold = 2
self._lines = []
self._is_vertical = is_vertical
self._current_line_index = 0
def accept(self, line):
line = ScanLine(self._current_line_index, line)
self._current_line_index += 1
self._lines.append(line)
def get_physical_lines(self) -> List[PhysicalLine]:
wave = OpenLineWave(self._physical_line_skip_threshold, self._stall_limit, self._is_vertical)
for line in self._lines:
for center_point_index, width in line.physical_line_center_indexes():
wave.accept(center_point_index, width)
wave.move()
return wave._collected_lines
```
#### File: recognition/primitives/digit_center.py
```python
import math
from lcd_digit_recognizer.recognition.utils import unit_vector, absolute_angle, calculate_angle_distance
class DigitCenter(object):
def __init__(self, x, y, voter):
self._x = x
self._y = y
self._voters = set([voter])
self._neighbours = []
self._cocenters = []
@property
def x(self):
return int(self._x)
@property
def y(self):
return int(self._y)
def as_point(self):
return (self.x, self.y)
@property
def average_segment_length(self):
length_sum = sum(v.metric_length for v in self._voters)
return length_sum / len(self._voters)
@property
def average_segment_angle(self):
angle_sum = sum(v.absolute_angle for v in self._voters)
return angle_sum / len(self._voters)
@property
def average_segment_aligned_angle(self):
for voter in self._voters:
# TODO add aligning averaging algorithm
return voter.absolute_angle
@property
def neighbours(self):
return self._neighbours
@property
def cocenters(self):
return self._cocenters
def absolute_angle_to(self, cocenter):
dx = cocenter.x - self.x
dy = cocenter.y - self.y
direction = unit_vector([dx, dy])
return absolute_angle(direction)
def aligned_angle_distance_to(self, center):
a1 = self.absolute_angle_to(center)
a2 = self.average_segment_aligned_angle
return min(
calculate_angle_distance(a1, a2),
calculate_angle_distance(a1, a2 + 90),
calculate_angle_distance(a1, a2 + 180),
calculate_angle_distance(a1, a2 + 270),
)
def try_add_neighbour(self, neighbour):
if neighbour is self:
return
if neighbour in self._neighbours:
return
"""
for voter in self._voters:
if voter in neighbour._voters:
return
for voter in neighbour._voters:
if voter in self._voters:
return
"""
self._neighbours.append(neighbour)
neighbour._neighbours.append(self)
def add_cocenter(self, cocenter):
self._cocenters.append(cocenter)
cocenter._cocenters.append(self)
def remove_cocenter(self, cocenter):
self._cocenters.remove(cocenter)
cocenter._cocenters.remove(self)
def merge_with(self, digit_center):
svc = len(self._voters)
ovc = len(digit_center._voters)
tc = svc + ovc
self._x = (self._x * svc + digit_center._x * ovc) / tc
self._y = (self._y * svc + digit_center._y * ovc) / tc
self._voters.union(digit_center._voters)
def can_merge_with(self, digit_center):
return True
def distance_to(self, digit_center):
return math.sqrt((self._x - digit_center._x) ** 2 + (self._y - digit_center._y) ** 2)
def __repr__(self):
return f"({self._x},{self._y})"
```
#### File: recognition/primitives/line_cluster.py
```python
import math
class LineCluster(object):
def __init__(self, *lines):
self._lines = list(lines)
self._tc = None
self._bc = None
self._parent = None
self._children = []
@property
def lines(self):
return list(self._lines)
@property
def line_count(self):
return len(self._lines)
@property
def points(self):
for line in self._lines:
for point in line.points:
yield point
@property
def top_corner(self):
if self._tc is None:
self._tc = min(p[0] for p in self.points), min(p[1] for p in self.points)
return self._tc
@property
def bottom_corner(self):
if self._bc is None:
self._bc = max(p[0] for p in self.points), max(p[1] for p in self.points)
return self._bc
@property
def family_top_corner(self):
tcx, tcy = self.top_corner
for child in self._children:
x, y = child.family_top_corner
tcx = min(tcx, x)
tcy = min(tcy, y)
return tcx, tcy
@property
def family_bottom_corner(self):
bcx, bcy = self.bottom_corner
for child in self._children:
x, y = child.family_bottom_corner
bcx = max(bcx, x)
bcy = max(bcy, y)
return bcx, bcy
@property
def family_width(self):
return self.family_bottom_corner[0] - self.family_top_corner[0]
@property
def family_height(self):
return self.family_bottom_corner[1] - self.family_top_corner[1]
@property
def width(self):
return self.bottom_corner[0] - self.top_corner[0]
@property
def height(self):
return self.bottom_corner[1] - self.top_corner[1]
@property
def center(self):
tc = self.top_corner
bc = self.bottom_corner
return (tc[0] + bc[0]) // 2, (tc[1] + bc[1]) // 2
@property
def line_width(self):
return sum(l.average_width for l in self._lines) / len(self._lines)
@property
def line_length(self):
return sum(l.length for l in self._lines) / len(self._lines)
@property
def is_leader(self):
return self._parent is None
def add(self, line):
self._lines.append(line)
def join_to(self, cluster):
self._parent = cluster
cluster._children.append(self)
def get_width_distance(self, line):
return max(abs(l.average_width - line.average_width) for l in self._lines)
def get_line_distance(self, line):
return min(line.distance_to_line(l) for l in self._lines)
def get_cluster_distance(self, cluster):
cx, cy = self.center
ocx, ocy = cluster.center
return math.sqrt((cx - ocx) ** 2 + (cy - ocy) ** 2)
def get_join_error(self, cluster):
return min(abs(self.width - cluster.width), abs(self.height - cluster.height))
```
#### File: recognition/primitives/scan_line.py
```python
from typing import List, Tuple
import numpy as np
class ScanLine(object):
def __init__(self, line_index, pixel_line):
self.line_index = line_index
self._centers = []
active_indexes = np.nonzero(pixel_line)[0]
if not len(active_indexes):
return
last_active_pixel_index = active_indexes[0]
for i in range(1, len(active_indexes)):
index = active_indexes[i]
width = abs(last_active_pixel_index - index)
if width < 5:
continue
center = (last_active_pixel_index + index) // 2
self._centers.append((center, width))
last_active_pixel_index = index
def physical_line_center_indexes(self) -> List[Tuple[int, int]]:
return list(self._centers)
@property
def is_empty(self):
return len(self._centers) == 0
```
#### File: lcd_digit_recognizer/visualization/drawing.py
```python
import colorsys
from typing import List
import cv2
import numpy as np
from lcd_digit_recognizer.recognition.digit_net2 import DigitNet2
from lcd_digit_recognizer.recognition.primitives.digit_hypothesis import DigitHypothesis
from lcd_digit_recognizer.recognition.primitives.physical_line import PhysicalLine
from lcd_digit_recognizer.visualization.utils import highlight_point, write_text, recoordinate
def draw_recognized_digits(output_img, digit_hyps):
if not digit_hyps:
return
segment_size = 1.0 / (len(digit_hyps) + 1)
segment_inc = segment_size + 0.5
h = 0
for hyp in digit_hyps:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
current_digit = hyp
while current_digit is not None:
center = current_digit._center
cocenter = current_digit._cocenter
highlight_point(output_img, center, color=color, size=10)
highlight_point(output_img, cocenter, color=color, size=5)
# cv2.line(output_img, recoordinate(center.as_point()), recoordinate(cocenter.as_point()), thickness=3,
# color=color)
write_text(output_img, [center.x, center.y + 10], str(current_digit.value), 0.8, color)
current_digit = current_digit._next_digit
for b in hyp.bucket:
center, cocenter = b[0:2]
highlight_point(output_img, center, color, size=3)
cv2.line(output_img, recoordinate(center.as_point()), recoordinate(cocenter.as_point()), thickness=1,
color=color)
h += segment_inc
def draw_aligned_center_buckets(output_img, net):
buckets = net.get_aligned_center_buckets()
segment_size = 1.0 / (len(buckets) + 1)
segment_inc = segment_size + 0.5
h = 0
for bucket in buckets:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
for b in bucket:
center, cocenter = b[0:2]
# print(b[2])
if len(b) > 4:
cv2.line(output_img, recoordinate(center.as_point()), recoordinate(b[4]), thickness=1, color=color)
highlight_point(output_img, center.as_point(), color)
cv2.line(output_img, recoordinate(center.as_point()), recoordinate(cocenter.as_point()), thickness=1,
color=color)
h += segment_inc
def draw_aligned_center_buckets_simple(output_img, net):
buckets = net.get_aligned_center_buckets()
segment_size = 1.0 / (len(buckets) + 1)
segment_inc = segment_size + 0.5
h = 0
for bucket in buckets:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
for b in bucket:
center, cocenter = b[0:2]
cv2.line(output_img, recoordinate(center.as_point()), recoordinate(cocenter.as_point()), thickness=1,
color=color)
h += segment_inc
def draw_lines(img, lines):
color = (255, 64, 64)
for line in lines:
# print(f"angle: {line.absolute_angle}")
# print(f"dir: {line.direction}")
normal_end1, normal_end2 = line.normal_points
# print(f"center: {line.center}")
# print(f"end: {normal_end1}")
thickness = 2
cv2.line(img, recoordinate(line.center), recoordinate(normal_end1), color=color, thickness=thickness)
cv2.line(img, recoordinate(line.center), recoordinate(normal_end2), color=color, thickness=thickness)
for x, y in line.points:
img[x, y] = color
def draw_cocenter_net(img, net):
for center in net.centers:
if not center.cocenters:
continue
cv2.line(img, recoordinate([center.x, center.y]), recoordinate([center.x, center.y]), thickness=4,
color=(0, 0, 255))
angle = center.average_segment_aligned_angle
rad = np.deg2rad(angle)
dir = np.array([np.cos(rad), np.sin(rad)])
p = np.array(center.as_point()) + dir * 10
# cv2.line(img, recoordinate(center.as_point()), recoordinate(p), thickness=2, color=(0, 255, 0))
for cocenter in center.cocenters:
cv2.line(img, recoordinate([center.x, center.y]), recoordinate([cocenter.x, cocenter.y]), thickness=1,
color=(0, 0, 255))
angle = center.absolute_angle_to(cocenter)
rad = np.deg2rad(angle)
dir2 = np.array([np.cos(rad), np.sin(rad)])
p2 = np.array(center.as_point()) + dir2 * 10
# cv2.line(img, recoordinate(center.as_point()), recoordinate(p2), thickness=2, color=(255, 0, 0))
def draw_physical_line_clusters(output_img, lines: List[List[PhysicalLine]]):
lines = list(filter(lambda c: len(c) > 1, lines))
segment_size = 1.0 / (len(lines) + 1)
segment_inc = segment_size + 0.5
h = 0
for line_cluster in lines:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
for line in line_cluster:
for point in line.points:
output_img[point[0], point[1]] = color
# points = list(line.points)
# cv2.line(output_img, recoordinate(points[0]), recoordinate(points[-1]), thickness=1,
# color=color)
h += segment_inc
def draw_join_points(output_img, lines: List[List[PhysicalLine]], net: DigitNet2):
segment_size = 1.0 / (len(lines) + 1)
segment_inc = segment_size + 0.5
h = 0
for line_cluster in lines:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
seed = line_cluster[0]
processed = set()
worklist = [seed]
while worklist:
line = worklist.pop(0)
if line in processed:
continue
processed.add(line)
print("--------------")
print(line)
for joint in line.joints:
print(joint)
neighbour = joint.target_line
p1, join_point, p2 = net.get_joint_points(line, neighbour)
cv2.line(output_img, recoordinate(p1), recoordinate(join_point), thickness=1,
color=(0, 0, 255))
cv2.line(output_img, recoordinate(join_point), recoordinate(p2), thickness=1,
color=(0, 0, 255))
highlight_point(output_img, join_point, color)
worklist.append(neighbour)
h += segment_inc
def draw_digit_hypotheses(output_img, digits: List[DigitHypothesis], net: DigitNet2):
segment_size = 1.0 / (len(digits) + 1)
segment_inc = segment_size + 0.5
h = 0
for digit in digits:
c = colorsys.hls_to_rgb(h % 1.0, 0.5, 1.0)
color = (c[0] * 255, c[1] * 255, c[2] * 255)
for vertex in digit.vertexes:
highlight_point(output_img, vertex, color)
h += segment_inc
```
#### File: web/recognition_processor/local_recognition_worker.py
```python
import base64
import pickle
import traceback
from time import sleep
import time
from lcd_digit_recognizer.web.recognition_processor.networking.socket_client import SocketClient
from lcd_digit_recognizer.web.recognition_processor.remote_processor_pool import RemoteProcessorPool
from recognize_seven_segment.hackathon_api import recognize_number
class LocalRecognitionWorker(object):
def __init__(self, host: str, port: str):
self._host = host
self._port = port
def blocking_run(self):
client = None
while True:
if client is None:
print("connecting SocketClient")
client = SocketClient()
client.connect(self._host, self._port)
job = client.read_next_json()
if job is not None:
print("job accepted")
start = time.time()
response = self.run_recognition(job)
client.send_json(response)
end = time.time()
print(f"response sent after {end - start:.2f} ")
if not client.is_connected:
client = None
sleep(1)
def run_recognition(self, job):
pickle_bytes = base64.b64decode(job["image_data"].encode('ascii'))
image = pickle.loads(pickle_bytes)
sid = job["sid"]
image_id = job["image_id"]
try:
number, metadata = recognize_number.recognize_number(image)
except Exception as e:
print(f"RECOGNIZER EXCEPTION: {e}")
formatted_exception = traceback.format_exc()
print(formatted_exception)
number = None
metadata = {"exception": formatted_exception}
metadata["input_id"] = job["input_id"]
result = {
"sid": sid,
"image_id": image_id,
"number": number,
"metadata": metadata
}
return result
worker = LocalRecognitionWorker("127.0.0.1", RemoteProcessorPool.port)
worker.blocking_run()
```
#### File: recognize_seven_segment/experiments/evaluation.py
```python
from typing import Dict, Callable, Tuple
import cv2
import numpy as np
from tqdm import tqdm
from recognize_seven_segment.detectors.detect_digits import detect_digits
from recognize_seven_segment.utils.input_output import list_image_paths, load_image
def get_label(image_path: str) -> str:
label_path = image_path[:-len(".jpg")] + ".txt"
with open(label_path, "r") as file:
label = file.readline()
return str(label)
def evaluate(image_dir: str, predict_function: Callable[[np.ndarray], Tuple[str, Dict]]) -> Dict[str, float]:
image_paths = list_image_paths(image_dir)
correctly_classified_count = 0
classified_count = 0
for image_path in tqdm(image_paths):
image = load_image(image_path)
label = get_label(image_path)
predicted_label, metadata = predict_function(image)
if predicted_label is not None:
classified_count += 1
annotated_display = metadata["annotated_image"]
cv2.imwrite(f"/tmp/annotations/{image_path.split('/')[-1]}", annotated_display)
if label == predicted_label:
correctly_classified_count += 1
return {"certain_and_correct": correctly_classified_count,
"total_certain": classified_count,
"total": len(image_paths)}
def main():
evaluation_images_dir = "/tmp/images/"
evaluation = evaluate(evaluation_images_dir, detect_digits)
print(evaluation)
if __name__ == "__main__":
main()
```
#### File: recognize_seven_segment/utils/preprocess.py
```python
import numpy as np
# def preprocess(image: np.ndarray, verbose: bool = False) -> np.ndarray:
# """
# Basic preprocessing of the input image.
# """
# rows, cols, channels = image.shape
# if verbose:
# print(f"Original rows: {rows}; Original columns: {cols}")
#
# if cols > rows:
# image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
#
# image = cv2.resize(image, (432, 768))
# return image
def preprocess(image: np.ndarray) -> np.ndarray:
return image
```
#### File: recognize_seven_segment/utils/recognize_single_digit.py
```python
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
# This is how we code segments by numbers 0 to 6
#
# 00000
# 1 2
# 1 2
# 33333
# 4 5
# 4 5
# 66666
#
segment_to_digit = {
"0010010": 1,
"1011101": 2,
"1011011": 3,
"0111010": 4,
"1101011": 5,
"1101111": 6,
"1110010": 7,
"1010010": 7,
"1111111": 8,
"1111011": 9,
"1110111": 0
}
def generate_mask(row, col, orientation, shape=None):
if shape is None:
shape = (70, 40)
mask = np.zeros(shape)
if orientation == "horizontal":
ellipse_shape = (6, 4)
if orientation == "vertical":
ellipse_shape = (4, 6)
mask = cv2.ellipse(mask, (col, row), ellipse_shape, 0, 0, 360, (255, 255, 255), 10)
mask = cv2.blur(mask, (10, 10))
return mask / 255
mask_0 = generate_mask(5, 20, "horizontal")
mask_1 = generate_mask(20, 5, "vertical")
mask_2 = generate_mask(20, 35, "vertical")
mask_3 = generate_mask(34, 20, "horizontal")
mask_4 = generate_mask(52, 5, "vertical")
mask_5 = generate_mask(52, 35, "vertical")
mask_6 = generate_mask(65, 20, "horizontal")
masks = [mask_0, mask_1, mask_2, mask_3, mask_4, mask_5, mask_6]
def recognize_single_digit(image: np.ndarray, threshold: float = 100) -> int:
try:
gray = image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except:
pass
resized = cv2.resize(gray, (40, 70))
resized = resized < np.mean(resized)
scores = [np.sum(mask * resized) for mask in masks]
scores = [score > threshold for score in scores]
scores = ["1" if score else "0" for score in scores]
return segment_to_digit.get("".join(scores))
if __name__ == "__main__":
image_path = "/tmp/images/"
image = Image.open(image_path)
image = np.array(image)
digit = recognize_single_digit(image, 100)
print(f"Recognized digit {digit}")
for i in [0, 1, 2, 3, 4, 5, 6]:
plt.imshow(masks[i])
plt.title(f"Mask {i}")
plt.show()
``` |
{
"source": "josef-pkt/statsmodels",
"score": 2
} |
#### File: statsmodels/genmod/bayes_mixed_glm.py
```python
from __future__ import division
import numpy as np
from scipy.optimize import minimize
from scipy import sparse
from statsmodels.iolib import summary2
from statsmodels.genmod import families
import pandas as pd
import warnings
import patsy
# Gauss-Legendre weights
glw = [[0.2955242247147529, -0.1488743389816312],
[0.2955242247147529, 0.1488743389816312],
[0.2692667193099963, -0.4333953941292472],
[0.2692667193099963, 0.4333953941292472],
[0.2190863625159820, -0.6794095682990244],
[0.2190863625159820, 0.6794095682990244],
[0.1494513491505806, -0.8650633666889845],
[0.1494513491505806, 0.8650633666889845],
[0.0666713443086881, -0.9739065285171717],
[0.0666713443086881, 0.9739065285171717]]
_init_doc = r"""
Fit a generalized linear mixed model using Bayesian methods.
The class implements the Laplace approximation to the posterior
distribution (`fit_map`) and a variational Bayes approximation to
the posterior (`fit_vb`). See the two fit method docstrings for
more information about the fitting approaches.
Parameters
----------
endog : array-like
Vector of response values.
exog_fe : array-like
Array of covariates for the fixed effects part of the mean
structure.
exog_vc : array-like
Array of covariates for the random part of the model. A
scipy.sparse array may be provided, or else the passed
array will be converted to sparse internally.
ident : array-like
Array of labels showing which random terms (columns of
`exog_vc`) have a common variance.
vc_p : float
Prior standard deviation for variance component parameters
(the prior standard deviation of log(s) is vc_p, where s is
the standard deviation of a random effect).
fe_p : float
Prior standard deviation for fixed effects parameters.
family : statsmodels.genmod.families instance
The GLM family.
fep_names : list of strings
The names of the fixed effects parameters (corresponding to
columns of exog_fe). If None, default names are constructed.
vcp_names : list of strings
The names of the variance component parameters (corresponding
to distinct labels in ident). If None, default names are
constructed.
vc_names : list of strings
The nmes of the random effect realizations.
Returns
-------
MixedGLMResults object
Notes
-----
There are three types of values in the posterior distribution:
fixed effects parameters (fep), corresponding to the columns of
`exog_fe`, random effects realizations (vc), corresponding to the
columns of `exog_vc`, and the standard deviations of the random
effects realizations (vcp), corresponding to the unique labels in
`ident`.
All random effects are modeled as being independent Gaussian
values (given the variance parameters). Every column of `exog_vc`
has a distinct realized random effect that is used to form the
linear predictors. The elements of `ident` determine the distinct
random effect variance parameters. Two random effect realizations
that have the same value in `ident` are constrained to have the
same variance. When fitting with a formula, `ident` is
constructed internally (each element of `vc_formulas` yields a
distinct label in `ident`).
The random effect standard deviation parameters (vcp) have
log-normal prior distributions with mean 0 and standard deviation
`vcp_p`.
Note that for some families, e.g. Binomial, the posterior mode may
be difficult to find numerically if `vcp_p` is set to too large of
a value. Setting `vcp_p` to 0.5 seems to work well.
The prior for the fixed effects parameters is Gaussian with mean 0
and standard deviation `fe_p`.
Examples
--------{example}
References
----------
Introduction to generalized linear mixed models:
https://stats.idre.ucla.edu/other/mult-pkg/introduction-to-generalized-linear-mixed-models
SAS documentation:
https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_intromix_a0000000215.htm
An assessment of estimation methods for generalized linear mixed
models with binary outcomes
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3866838/
"""
_logit_example = """
A binomial (logistic) random effects model with random intercepts
for villages and random slopes for each year within each village:
>>> data['year_cen'] = data['Year'] - data.Year.mean()
>>> random = ['0 + C(Village)', '0 + C(Village)*year_cen']
>>> model = BinomialBayesMixedGLM.from_formula('y ~ year_cen',
random, data)
>>> result = model.fit()
"""
_poisson_example = """
A Poisson random effects model with random intercepts for villages
and random slopes for each year within each village:
>>> data['year_cen'] = data['Year'] - data.Year.mean()
>>> random = ['0 + C(Village)', '0 + C(Village)*year_cen']
>>> model = PoissonBayesMixedGLM.from_formula('y ~ year_cen',
random, data)
>>> result = model.fit()
"""
class _BayesMixedGLM(object):
def __init__(self, endog, exog_fe, exog_vc, ident, family,
vcp_p=1, fe_p=2, fep_names=None,
vcp_names=None, vc_names=None):
if len(ident) != exog_vc.shape[1]:
msg = "len(ident) should match the number of columns of exog_vc"
raise ValueError(msg)
# Get the fixed effects parameter names
if fep_names is None:
if hasattr(exog_fe, "columns"):
fep_names = exog_fe.columns.tolist()
else:
fep_names = ["FE_%d" % (k + 1)
for k in range(exog_fe.shape[1])]
self.fep_names = fep_names
# Get the variance parameter names
if vcp_names is None:
vcp_names = ["VC_%d" % (k + 1)
for k in range(int(max(ident)) + 1)]
else:
if len(vcp_names) != len(set(ident)):
msg = "The lengths of vcp_names and ident should be the same"
raise ValueError(msg)
self.vcp_names = vcp_names
# Get the variance component realization (random effect)
# names.
self.vc_names = vc_names
self.endog = np.asarray(endog)
self.exog_fe = np.asarray(exog_fe)
if sparse.issparse(exog_vc):
self.exog_vc = exog_vc
else:
self.exog_vc = sparse.csr_matrix(exog_vc)
self.ident = ident.astype(np.int)
self.family = family
self.vcp_p = float(vcp_p)
self.fe_p = float(fe_p)
# Number of fixed effects parameters
if self.exog_fe is None:
self.k_fep = 0
else:
self.k_fep = exog_fe.shape[1]
# Number of variance component structure parameters and
# variance component realizations.
if self.exog_vc is None:
self.k_vc = 0
self.k_vcp = 0
else:
self.k_vc = exog_vc.shape[1]
self.k_vcp = max(self.ident) + 1
# power would be better but not available in older scipy
self.exog_vc2 = self.exog_vc.multiply(self.exog_vc)
def _unpack(self, vec):
ii = 0
# Fixed effects parameters
fep = vec[:ii+self.k_fep]
ii += self.k_fep
# Variance component structure parameters (standard
# deviations). These are on the log scale. The standard
# deviation for random effect j is exp(vcp[ident[j]]).
vcp = vec[ii:ii+self.k_vcp]
ii += self.k_vcp
# Random effect realizations
vc = vec[ii:]
return fep, vcp, vc
def logposterior(self, params):
"""
The overall log-density: log p(y, fe, vc, vcp).
This differs by an additive constant from the log posterior
log p(fe, vc, vcp | y).
"""
fep, vcp, vc = self._unpack(params)
# Contributions from p(y | vc)
lp = 0
if self.k_fep > 0:
lp += np.dot(self.exog_fe, fep)
if self.k_vc > 0:
lp += self.exog_vc.dot(vc)
mu = self.family.link.inverse(lp)
ll = self.family.loglike(self.endog, mu)
if self.k_vc > 0:
# Contribution from p(vc | vcp)
vcp0 = vcp[self.ident]
s = np.exp(vcp0)
ll -= 0.5 * np.sum(vc**2 / s**2) + np.sum(vcp0)
# Prior for vc parameters
ll -= 0.5 * np.sum(vcp**2 / self.vcp_p**2)
# Contributions from p(fep)
if self.k_fep > 0:
ll -= 0.5 * np.sum(fep**2 / self.fe_p**2)
return ll
def logposterior_grad(self, params):
"""
The gradient of the log posterior.
"""
fep, vcp, vc = self._unpack(params)
lp = 0
if self.k_fep > 0:
lp += np.dot(self.exog_fe, fep)
if self.k_vc > 0:
lp += self.exog_vc.dot(vc)
mu = self.family.link.inverse(lp)
score_factor = (self.endog - mu) / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
te = [None, None, None]
# Contributions from p(y | x, z, vc)
if self.k_fep > 0:
te[0] = np.dot(score_factor, self.exog_fe)
if self.k_vc > 0:
te[2] = self.exog_vc.transpose().dot(score_factor)
if self.k_vc > 0:
# Contributions from p(vc | vcp)
# vcp0 = vcp[self.ident]
# s = np.exp(vcp0)
# ll -= 0.5 * np.sum(vc**2 / s**2) + np.sum(vcp0)
vcp0 = vcp[self.ident]
s = np.exp(vcp0)
u = vc**2 / s**2 - 1
te[1] = np.bincount(self.ident, weights=u)
te[2] -= vc / s**2
# Contributions from p(vcp)
# ll -= 0.5 * np.sum(vcp**2 / self.vcp_p**2)
te[1] -= vcp / self.vcp_p**2
# Contributions from p(fep)
if self.k_fep > 0:
te[0] -= fep / self.fe_p**2
te = [x for x in te if x is not None]
return np.concatenate(te)
def _get_start(self):
start_fep = np.zeros(self.k_fep)
start_vcp = np.ones(self.k_vcp)
start_vc = np.random.normal(size=self.k_vc)
start = np.concatenate((start_fep, start_vcp, start_vc))
return start
@classmethod
def from_formula(cls, formula, vc_formulas, data, family=None,
vcp_p=1, fe_p=2, vc_names=None):
"""
Fit a BayesMixedGLM using a formula.
Parameters
----------
formula : string
Formula for the endog and fixed effects terms (use ~ to separate
dependent and independent expressions).
vc_formulas : dictionary
vc_formulas[name] is a one-sided formula that creates one
collection of random effects with a common variance
prameter. If using a categorical expression to produce
variance components, note that generally `0 + ...` should
be used so that an intercept is not included.
data : data frame
The data to which the formulas are applied.
family : genmod.families instance
A GLM family.
vcp_p : float
The prior standard deviation for the logarithms of the standard
deviations of the random effects.
fe_p : float
The prior standard deviation for the fixed effects parameters.
vc_names : list
Names of random effects realizations
"""
endog, exog_fe = patsy.dmatrices(formula, data,
return_type='dataframe')
ident = []
exog_vc = []
vcp_names = []
j = 0
for na, fml in vc_formulas.items():
mat = patsy.dmatrix(fml, data, return_type='dataframe')
exog_vc.append(mat)
vcp_names.append(na)
ident.append(j * np.ones(mat.shape[1]))
j += 1
exog_vc = pd.concat(exog_vc, axis=1)
vc_names = exog_vc.columns.tolist()
ident = np.concatenate(ident)
endog = np.squeeze(np.asarray(endog))
fep_names = exog_fe.columns.tolist()
exog_fe = np.asarray(exog_fe)
exog_vc = sparse.csr_matrix(np.asarray(exog_vc))
mod = _BayesMixedGLM(endog, exog_fe, exog_vc, ident, family,
vcp_p, fe_p, fep_names=fep_names,
vcp_names=vcp_names, vc_names=vc_names)
return mod
def fit_map(self, method="BFGS", minim_opts=None):
"""
Construct the Laplace approximation to the posterior
distribution.
Parameters
----------
method : string
Optimization method for finding the posterior mode.
minim_opts : dict-like
Options passed to scipy.minimize.
Returns
-------
BayesMixedGLMResults instance.
"""
def fun(params):
return -self.logposterior(params)
def grad(params):
return -self.logposterior_grad(params)
start = self._get_start()
r = minimize(fun, start, method=method, jac=grad, options=minim_opts)
if not r.success:
msg = ("Laplace fitting did not converge, |gradient|=%.6f" %
np.sqrt(np.sum(r.jac**2)))
warnings.warn(msg)
from statsmodels.tools.numdiff import approx_fprime
hess = approx_fprime(r.x, grad)
hess_inv = np.linalg.inv(hess)
return BayesMixedGLMResults(self, r.x, hess_inv, optim_retvals=r)
class _VariationalBayesMixedGLM(object):
"""
A mixin providing generic (not family-specific) methods for
variational Bayes mean field fitting.
"""
# Integration range (from -rng to +rng). The integrals are with
# respect to a standard Gaussian distribution so (-5, 5) will be
# sufficient in many cases.
rng = 5
verbose = False
# Returns the mean and variance of the linear predictor under the
# given distribution parameters.
def _lp_stats(self, fep_mean, fep_sd, vc_mean, vc_sd):
tm = np.dot(self.exog_fe, fep_mean)
tv = np.dot(self.exog_fe**2, fep_sd**2)
tm += self.exog_vc.dot(vc_mean)
tv += self.exog_vc2.dot(vc_sd**2)
return tm, tv
def vb_elbo_base(self, h, tm, fep_mean, vcp_mean, vc_mean,
fep_sd, vcp_sd, vc_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
This function calculates the family-specific ELBO function
based on information provided from a subclass.
Parameters
----------
h : function mapping 1d vector to 1d vector
The contribution of the model to the ELBO function can be
expressed as y_i*lp_i + Eh_i(z), where y_i and lp_i are
the response and linear predictor for observation i, and z
is a standard normal rangom variable. This formulation
can be achieved for any GLM with a canonical link
function.
"""
# p(y | vc) contributions
iv = 0
for w in glw:
z = self.rng * w[1]
iv += w[0] * h(z) * np.exp(-z**2 / 2)
iv /= np.sqrt(2*np.pi)
iv *= self.rng
iv += self.endog * tm
iv = iv.sum()
# p(vc | vcp) * p(vcp) * p(fep) contributions
iv += self._elbo_common(fep_mean, fep_sd, vcp_mean, vcp_sd,
vc_mean, vc_sd)
r = (iv + np.sum(np.log(fep_sd)) + np.sum(np.log(vcp_sd)) +
np.sum(np.log(vc_sd)))
return r
def vb_elbo_grad_base(self, h, tm, tv, fep_mean, vcp_mean, vc_mean,
fep_sd, vcp_sd, vc_sd):
"""
Return the gradient of the ELBO function.
See vb_elbo_base for parameters.
"""
fep_mean_grad = 0.
fep_sd_grad = 0.
vcp_mean_grad = 0.
vcp_sd_grad = 0.
vc_mean_grad = 0.
vc_sd_grad = 0.
# p(y | vc) contributions
for w in glw:
z = self.rng * w[1]
u = h(z) * np.exp(-z**2 / 2) / np.sqrt(2*np.pi)
r = u / np.sqrt(tv)
fep_mean_grad += w[0] * np.dot(u, self.exog_fe)
vc_mean_grad += w[0] * self.exog_vc.transpose().dot(u)
fep_sd_grad += w[0] * z * np.dot(r, self.exog_fe**2 * fep_sd)
v = self.exog_vc2.multiply(vc_sd).transpose().dot(r)
v = np.squeeze(np.asarray(v))
vc_sd_grad += w[0] * z * v
fep_mean_grad *= self.rng
vc_mean_grad *= self.rng
fep_sd_grad *= self.rng
vc_sd_grad *= self.rng
fep_mean_grad += np.dot(self.endog, self.exog_fe)
vc_mean_grad += self.exog_vc.transpose().dot(self.endog)
(fep_mean_grad_i, fep_sd_grad_i, vcp_mean_grad_i, vcp_sd_grad_i,
vc_mean_grad_i, vc_sd_grad_i) = self._elbo_grad_common(
fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean, vc_sd)
fep_mean_grad += fep_mean_grad_i
fep_sd_grad += fep_sd_grad_i
vcp_mean_grad += vcp_mean_grad_i
vcp_sd_grad += vcp_sd_grad_i
vc_mean_grad += vc_mean_grad_i
vc_sd_grad += vc_sd_grad_i
fep_sd_grad += 1 / fep_sd
vcp_sd_grad += 1 / vcp_sd
vc_sd_grad += 1 / vc_sd
mean_grad = np.concatenate((fep_mean_grad, vcp_mean_grad,
vc_mean_grad))
sd_grad = np.concatenate((fep_sd_grad, vcp_sd_grad, vc_sd_grad))
if self.verbose:
print("|G|=%f" % np.sqrt(np.sum(mean_grad**2) +
np.sum(sd_grad**2)))
return mean_grad, sd_grad
def fit_vb(self, mean=None, sd=None, minim_opts=None, verbose=False):
"""
Fit a model using the variational Bayes mean field approximation.
Parameters:
-----------
mean : array-like
Starting value for VB mean vector
sd : array-like
Starting value for VB standard deviation vector
minim_opts : dict-like
Options passed to scipy.minimize
verbose : bool
If True, print the gradient norm to the screen each time
it is calculated.
Notes
-----
The goal is to find a factored Gaussian approximation
q1*q2*... to the posterior distribution, approximately
minimizing the KL divergence from the factored approximation
to the actual posterior. The KL divergence, or ELBO function
has the form
E* log p(y, fe, vcp, vc) - E* log q
where E* is expectation with respect to the product of qj.
References
----------
Blei, Kucukelbir, McAuliffe (2017). Variational Inference: A
review for Statisticians
https://arxiv.org/pdf/1601.00670.pdf
"""
self.verbose = verbose
n = self.k_fep + self.k_vcp + self.k_vc
ml = self.k_fep + self.k_vcp + self.k_vc
if mean is None:
m = np.zeros(n)
else:
if len(mean) != ml:
raise ValueError("mean has incorrect length, %d != %d" %
(len(mean), ml))
m = mean.copy()
if sd is None:
s = -0.5 + 0.1 * np.random.normal(size=n)
else:
if len(sd) != ml:
raise ValueError("sd has incorrect length, %d != %d" %
(len(sd), ml))
# s is parameterized on the log-scale internally when
# optimizing the ELBO function (this is transparent to the
# caller)
s = np.log(sd)
# Don't allow the variance parameter starting mean values to
# be too small.
i1, i2 = self.k_fep, self.k_fep + self.k_vcp
m[i1:i2] = np.where(m[i1:i2] < -1, -1, m[i1:i2])
# Don't allow the posterior standard deviation starting values
# to be too small.
s = np.where(s < -1, -1, s)
def elbo(x):
n = len(x) // 2
return -self.vb_elbo(x[:n], np.exp(x[n:]))
def elbo_grad(x):
n = len(x) // 2
gm, gs = self.vb_elbo_grad(x[:n], np.exp(x[n:]))
gs *= np.exp(x[n:])
return -np.concatenate((gm, gs))
start = np.concatenate((m, s))
mm = minimize(elbo, start, jac=elbo_grad, method="bfgs",
options=minim_opts)
if not mm.success:
warnings.warn("VB fitting did not converge")
n = len(mm.x) // 2
return BayesMixedGLMResults(self, mm.x[0:n], np.exp(2*mm.x[n:]), mm)
# Handle terms in the ELBO that are common to all models.
def _elbo_common(self, fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean, vc_sd):
iv = 0
# p(vc | vcp) contributions
m = vcp_mean[self.ident]
s = vcp_sd[self.ident]
iv -= np.sum((vc_mean**2 + vc_sd**2) * np.exp(2*(s**2 - m))) / 2
iv -= np.sum(m)
# p(vcp) contributions
iv -= 0.5 * (vcp_mean**2 + vcp_sd**2).sum() / self.vcp_p**2
# p(b) contributions
iv -= 0.5 * (fep_mean**2 + fep_sd**2).sum() / self.fe_p**2
return iv
def _elbo_grad_common(self, fep_mean, fep_sd, vcp_mean, vcp_sd,
vc_mean, vc_sd):
# p(vc | vcp) contributions
m = vcp_mean[self.ident]
s = vcp_sd[self.ident]
u = vc_mean**2 + vc_sd**2
ve = np.exp(2*(s**2 - m))
dm = u * ve - 1
ds = -2 * u * ve * s
vcp_mean_grad = np.bincount(self.ident, weights=dm)
vcp_sd_grad = np.bincount(self.ident, weights=ds)
vc_mean_grad = -vc_mean.copy() * ve
vc_sd_grad = -vc_sd.copy() * ve
# p(vcp) contributions
vcp_mean_grad -= vcp_mean / self.vcp_p**2
vcp_sd_grad -= vcp_sd / self.vcp_p**2
# p(b) contributions
fep_mean_grad = -fep_mean.copy() / self.fe_p**2
fep_sd_grad = -fep_sd.copy() / self.fe_p**2
return (fep_mean_grad, fep_sd_grad, vcp_mean_grad, vcp_sd_grad,
vc_mean_grad, vc_sd_grad)
class BayesMixedGLMResults(object):
"""
Attributes
----------
fe_mean : array-like
Posterior mean of the fixed effects coefficients.
fe_sd : array-like
Posterior standard deviation of the fixed effects coefficients
vcp_mean : array-like
Posterior mean of the logged variance component standard
deviations.
vcp_sd : array-like
Posterior standard deviation of the logged variance component
standard deviations.
vc_mean : array-like
Posterior mean of the random coefficients
vc_sd : array-like
Posterior standard deviation of the random coefficients
"""
def __init__(self, model, params, cov_params,
optim_retvals=None):
self.model = model
self.params = params
self.cov_params = cov_params
self.optim_retvals = optim_retvals
self.fe_mean, self.vcp_mean, self.vc_mean = (
model._unpack(params))
if cov_params.ndim == 2:
cp = np.diag(cov_params)
else:
cp = cov_params
self.fe_sd, self.vcp_sd, self.vc_sd = model._unpack(cp)
self.fe_sd = np.sqrt(self.fe_sd)
self.vcp_sd = np.sqrt(self.vcp_sd)
self.vc_sd = np.sqrt(self.vc_sd)
def summary(self):
df = pd.DataFrame()
m = self.model.k_fep + self.model.k_vcp
df["Type"] = (["F" for k in range(self.model.k_fep)] +
["R" for k in range(self.model.k_vcp)])
df["Post. Mean"] = self.params[0:m]
if self.cov_params.ndim == 2:
v = np.diag(self.cov_params)[0:m]
df["Post. SD"] = np.sqrt(v)
else:
df["Post. SD"] = np.sqrt(self.cov_params[0:m])
# Convert variance parameters to natural scale
df["VC"] = np.exp(df["Post. Mean"])
df["VC (LB)"] = np.exp(df["Post. Mean"] - 2*df["Post. SD"])
df["VC (UB)"] = np.exp(df["Post. Mean"] + 2*df["Post. SD"])
df["VC"] = ["%.3f" % x for x in df.VC]
df["VC (LB)"] = ["%.3f" % x for x in df["VC (LB)"]]
df["VC (UB)"] = ["%.3f" % x for x in df["VC (UB)"]]
df.loc[df.index < self.model.k_fep, "VC"] = ""
df.loc[df.index < self.model.k_fep, "VC (LB)"] = ""
df.loc[df.index < self.model.k_fep, "VC (UB)"] = ""
df.index = self.model.fep_names + self.model.vcp_names
summ = summary2.Summary()
summ.add_title(self.model.family.__class__.__name__ +
" Mixed GLM Results")
summ.add_df(df)
return summ
def random_effects(self, term=None):
"""
Posterior mean and standard deviation of random effects.
Parameters
----------
term : int or None
If None, results for all random effects are returned. If
an integer, returns results for a given set of random
effects. The value of `term` refers to an element of the
`ident` vector, or to a position in the `vc_formulas`
list.
Returns
-------
Data frame of posterior means and posterior standard
deviations of random effects.
"""
z = self.vc_mean
s = self.vc_sd
na = self.model.vc_names
if term is not None:
termix = self.model.vcp_names.index(term)
ii = np.flatnonzero(self.model.ident == termix)
z = z[ii]
s = s[ii]
na = [na[i] for i in ii]
x = pd.DataFrame({"Mean": z, "SD": s})
if na is not None:
x.index = na
return x
class BinomialBayesMixedGLM(_VariationalBayesMixedGLM, _BayesMixedGLM):
__doc__ = _init_doc.format(example=_logit_example)
def __init__(self, endog, exog_fe, exog_vc, ident, vcp_p=1,
fe_p=2, fep_names=None, vcp_names=None,
vc_names=None):
super(BinomialBayesMixedGLM, self).__init__(
endog=endog, exog_fe=exog_fe, exog_vc=exog_vc,
ident=ident, vcp_p=vcp_p, fe_p=fe_p,
family=families.Binomial(),
fep_names=fep_names, vcp_names=vcp_names,
vc_names=vc_names)
@classmethod
def from_formula(cls, formula, vc_formulas, data, vcp_p=1, fe_p=2,
vc_names=None):
fam = families.Binomial()
x = _BayesMixedGLM.from_formula(
formula, vc_formulas, data, family=fam, vcp_p=vcp_p, fe_p=fe_p,
vc_names=vc_names)
return BinomialBayesMixedGLM(
endog=x.endog, exog_fe=x.exog_fe, exog_vc=x.exog_vc,
ident=x.ident, vcp_p=x.vcp_p, fe_p=x.fe_p,
fep_names=x.fep_names, vcp_names=x.vcp_names,
vc_names=x.vc_names)
def vb_elbo(self, vb_mean, vb_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
return -np.log(1 + np.exp(tm + np.sqrt(tv)*z))
return self.vb_elbo_base(
h, tm, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
def vb_elbo_grad(self, vb_mean, vb_sd):
"""
Returns the gradient of the model's evidence lower bound (ELBO).
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
u = tm + np.sqrt(tv)*z
x = np.exp(u) / (1 + np.exp(u))
return -x
return self.vb_elbo_grad_base(
h, tm, tv, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
class PoissonBayesMixedGLM(_VariationalBayesMixedGLM, _BayesMixedGLM):
__doc__ = _init_doc.format(example=_poisson_example)
def __init__(self, endog, exog_fe, exog_vc, ident, vcp_p=1,
fe_p=2, fep_names=None, vcp_names=None):
super(PoissonBayesMixedGLM, self).__init__(
endog=endog, exog_fe=exog_fe, exog_vc=exog_vc,
ident=ident, vcp_p=vcp_p, fe_p=fe_p,
family=families.Poisson(),
fep_names=fep_names, vcp_names=vcp_names)
@classmethod
def from_formula(cls, formula, vc_formulas, data, vcp_p=1, fe_p=2,
vcp_names=None, vc_names=None):
fam = families.Poisson()
x = _BayesMixedGLM.from_formula(
formula, vc_formulas, data, family=fam, vcp_p=vcp_p, fe_p=fe_p,
vcp_names=vcp_names, vc_names=vc_names)
return PoissonBayesMixedGLM(
endog=x.endog, exog_fe=x.exog_fe, exog_vc=x.exog_vc,
ident=x.ident, vcp_p=x.vcp_p, fe_p=x.fe_p,
fep_names=x.fep_names, vcp_names=x.vcp_names,
vc_names=x.vc_names)
def vb_elbo(self, vb_mean, vb_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
return -np.exp(tm + np.sqrt(tv)*z)
return self.vb_elbo_base(
h, tm, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
def vb_elbo_grad(self, vb_mean, vb_sd):
"""
Returns the gradient of the model's evidence lower bound (ELBO).
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
y = -np.exp(tm + np.sqrt(tv)*z)
return y
return self.vb_elbo_grad_base(
h, tm, tv, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
``` |
{
"source": "JoseFranciscoSanchezGutierrez/m1t1b",
"score": 3
} |
#### File: JoseFranciscoSanchezGutierrez/m1t1b/test_calcular.py
```python
import calcular
class TestCalcular:
def test_suma(self):
num1 = 10
num2 = 5
resultado = num1 + num2
assert resultado == calcular.suma(num1, num2)
def test_resta(self):
num1 = 10
num2 = 5
resultado = num1 - num2
assert resultado == calcular.resta(num1, num2)
``` |
{
"source": "josefrank/codewars",
"score": 5
} |
#### File: codewars/Python/1_even_or_odd.py
```python
def even_or_odd(number):
# Returns even if so, odd otherwise.
if number % 2 == 0:
return "Even"
return "Odd"
``` |
{
"source": "JosefRichmond21/SatelliteConstellationCreator",
"score": 3
} |
#### File: SatelliteConstellationCreator/satellite_constellation/utils.py
```python
def mod(x, y):
"""
Mappable modulo function
:param x: First number
:param y: Second number
:return: x % y
"""
return [a % b for a, b in zip(x, y)]
heavenly_body_radius = {
"earth": 6371,
"luna": 1737,
"mars": 3390,
"venus": 6052,
"mercury": 2440,
"sol": 695700,
"jupiter": 69911,
"saturn": 58232,
"uranus": 25362,
"neptune": 24622,
"pluto": 1188,
}
``` |
{
"source": "joseftsch/powerstats",
"score": 3
} |
#### File: joseftsch/powerstats/fronius.py
```python
import configparser
import requests
import sys
import mysql.connector
from datetime import date
from influxdb import InfluxDBClient
import logging
def main():
logging.basicConfig(format='%(asctime)s %(module)s %(levelname)s %(funcName)s %(message)s', level=logging.INFO)
logging.info("Fronius Power gathering startup!")
config = configparser.ConfigParser()
try:
config.read("/config.ini")
if len(config) < 2:
raise Exception
except Exception as e:
logging.error("Failed to read config file", exc_info=True)
sys.exit()
url=config['general']['url']
# get pv data
res={}
data=GetData(url)
if not data or not isinstance(data, dict):
logging.error("We did not receive a valid response from Inverter")
sys.exit()
try:
raw_current_pv_watt = data['Body']['Data']['Site']['P_PV']
if raw_current_pv_watt:
res['current_pv_watt'] = int(raw_current_pv_watt)
else:
res['current_pv_watt'] = 0
res['current_consumption_from_grid_watt'] = float(data['Body']['Data']['Site']['P_Grid'])
res['current_consumption_house_watt'] = float(data['Body']['Data']['Site']['P_Load'])
res['energy_pv_today_wh'] = float(data['Body']['Data']['Site']['E_Day'])
res['energy_pv_year_wh'] = float(data['Body']['Data']['Site']['E_Year'])
res['energy_pv_total_wh'] = float(data['Body']['Data']['Site']['E_Total'])
res['autonomy_percent'] = int(data['Body']['Data']['Site']['rel_Autonomy'])
raw_selfconsumption_percent = data['Body']['Data']['Site']['rel_SelfConsumption']
if raw_selfconsumption_percent:
res['selfconsumption_percent'] = int(raw_selfconsumption_percent)
else:
res['selfconsumption_percent'] = 0
except Exception as e:
logging.error("Unable to assign values ... maybe element missing. Exception: {}".format(e), exc_info=True)
sys.exit()
#verify that we have only integers in dict
try:
for k,v in res.items():
if not isinstance(v, int) and not isinstance(v, float):
logging.error("{} is not an integer or float. value: {}".format(str(k),str(v)))
sys.exit()
except Exception as e:
logging.error("Something went wrong. Exception: {}".format(e), exc_info=True)
sys.exit()
#insert data into mysql
if 'stdout' in config.sections():
logging.info("Print to Stdout")
logging.info(data)
logging.info(res)
#insert data into mysql
if 'mysql' in config.sections():
logging.info("Insert into MySQL")
status=MySQLInsert(res,config)
if not status:
logging.error("Something went wrong during MySQL insert")
sys.exit()
#insert data into influxdb
if 'influxdb' in config.sections():
logging.info("Insert into InfluxDB")
status=InfluxDBInsert(res,config)
if not status:
logging.error("Something went wrong during influxdb insert")
sys.exit()
logging.info("Fronius Power gathering shutdown!")
def GetData(url:str)->dict:
try:
r = requests.get(url, timeout=3)
r.raise_for_status()
return r.json()
except Exception as e:
logging.error("Unable to request data. Exception: {}".format(str(e)), exc_info=True)
sys.exit()
def InfluxDBInsert(res:dict,config:configparser)->bool:
datalist = []
data = {}
data["measurement"] = "power"
data["fields"] = {}
for k,v in res.items():
data["fields"][k] = v
datalist.append(data.copy())
client = InfluxDBClient(
config['influxdb']['influxdbhost'],
config['influxdb']['influxdbport'],
config['influxdb']['influxdbuser'],
config['influxdb']['influxdbpassword'],
config['influxdb']['influxdbdb'])
try:
client.write_points(datalist)
client.close()
except Exception as e:
logging.error("Error during InfluxDB connection. Exception: {}".format(str(e)), exc_info=True)
return False
return True
def MySQLInsert(res:dict,config:configparser)->bool:
mysqltable="power_{}{:02d}".format(date.today().year,date.today().month)
placeholder = ", ".join(["%s"] * len(res))
stmt = "INSERT INTO {} ({}) values ({});".format(mysqltable, ",".join(res.keys()), placeholder)
#establish mysql connection
try:
mydb = mysql.connector.connect(host=config['mysql']['mysqlhost'],user=config['mysql']['mysqluser'],password=config['mysql']['<PASSWORD>'],database=config['mysql']['mysqldb'])
mycursor = mydb.cursor()
except mysql.connector.Error as e:
logging.error("Error during MySQL connection. Exception: {}".format(str(e)), exc_info=True)
return False
# #insert into db
try:
mycursor.execute(stmt, list(res.values()))
mydb.commit()
mydb.close()
except Exception as e:
logging.error("Error during insert of data into MySQL. Exception: {}".format(str(e)), exc_info=True)
return False
return True
if __name__ == "__main__":
main()
``` |
{
"source": "JosefUtbult/CampaignGenerator",
"score": 3
} |
#### File: CampaignGenerator/src/markdown_preprocessor.py
```python
import json
import xml.etree.ElementTree as etree
import markdown as markdown
from markdown.treeprocessors import Treeprocessor
from markdown.blockprocessors import BlockProcessor
from markdown.postprocessors import Postprocessor
from markdown.extensions import Extension
import re
# Adds an id attribute to headings ending in the tag "[ID_NAME]"
# Example:
# ## <NAME> [kar:AnnaOlofsson]
class HeadingReferences(Treeprocessor):
RE_FENCE = r'[^\[]*\[(.*)\]'
def run(self, root):
return self.add_link(root)
def add_link(self, parent):
if parent.tag in ['h1', 'h2', 'h3', 'h4', 'h5']:
res = re.match(self.RE_FENCE, parent.text)
if res:
parent.attrib = {"id": res.group(1)}
parent.text = parent.text.split('[')[0]
for child in parent:
self.add_link(child)
# Pulls blocks encapsulated by "!!! TYPE" from the markdown, parse the type
# and handle it in the specified GameSystem object
# Example:
# !!! Character
# Stuff
# !!!
class MarkdownPreprocessor(BlockProcessor):
RE_CHARACTER = r''
# Check for three exclamation marks and a block specifier
RE_FENCE = r'^ *`{3,}(\S*)[\n]((.|\n)*)`{3,}'
RE_CHECK_JSON = r'^ *`{3,}[\S]*\s*(\{)'
RE_FIND_STRING = r'"([^"]*)"'
def __init__(self, parser, game_system_class):
super().__init__(parser)
self.game_system_class = game_system_class
def test(self, parent, block):
res = re.match(self.RE_FENCE, block)
if res and len(res.groups()) and res.group(1).lower() in self.game_system_class.function_map:
return res
return None
def run(self, parent, blocks):
block = blocks.pop(0)
# print(f'Parsing block: "{block}"')
res = re.match(self.RE_FENCE, block)
if res and len(res.groups()) > 2:
type = res.group(1)
content = res.group(2)
if type.lower() in self.game_system_class.function_map:
try:
if re.match(self.RE_CHECK_JSON, block):
parsed_content = json.loads(content.replace('\n', ''))
else:
parsed_content = {"content": content}
parsed_content = self.game_system_class.function_map[type.lower()](parsed_content)
except Exception as e:
parsed_content = f'<p style="color: red"><b>Unable to parse content: {e}</b></p>'
else:
parsed_content = parse_markdown(block)
child = etree.fromstring(parsed_content)
parent.append(child)
# Remove br tags that are generated by some reason by markdown
class MarkdownPostprocessor(Postprocessor):
def __init__(self, game_system_class):
self.game_system_class = game_system_class
def run(self, text):
text = text.replace("<br />\n", "")
return self.game_system_class.stylize(text)
class CampainGeneratorExtension(Extension):
def __init__(self, game_system_class):
self.game_system_class = game_system_class
def extendMarkdown(self, md):
md.treeprocessors.register(HeadingReferences(md.parser), 'HeadingReferences', 100)
md.parser.blockprocessors.register(MarkdownPreprocessor(md.parser, self.game_system_class),
'MarkdownPreprocessor', 101)
md.postprocessors.register(MarkdownPostprocessor(self.game_system_class), 'MarkdownPostProcessor', float('inf'))
system = None
def parse_markdown(raw, _system=None):
global system
if _system:
system = _system
return markdown.markdown(raw, extensions=[CampainGeneratorExtension(system), 'tables', 'toc'])
``` |
{
"source": "josefwaller/PythonCatanLibrary",
"score": 3
} |
#### File: PythonCatanLibrary/examples/board_renderer.py
```python
from pycatan.board import Board
from pycatan.hex_type import HexType
from pycatan.game import Game
from blessings import Terminal
import math
# Render an board object in ascii in the command prompt
class BoardRenderer:
def __init__(self, board, center):
self.board = board
self.center = center
self.terminal = Terminal()
# Different colors to use for the 4 players
self.player_colors = [
self.terminal.red,
self.terminal.cyan,
self.terminal.green,
self.terminal.yellow
]
def render(self):
# Clear screen
print(self.terminal.clear())
# Render hexes
for r in self.board.hexes:
for h in r:
self.render_hex(h)
# Render roads
for r in self.board.roads:
self.render_road(r)
# Render points
for r in self.board.points:
for p in r:
self.render_point(p)
# Reset cursor position
print(self.terminal.position(0, 0))
def render_hex(self, hex_obj):
# the lines needed to draw each hex
hex_lines = [
"___",
"/%s%s\\" % (BoardRenderer.get_hex_type_string(hex_obj.type), str(hex_obj.token_num).rjust(2) if hex_obj.token_num else " "),
"\\___/"
]
# Get the x, y coordinates to render the hex
coords = self.get_render_coords(hex_obj.position[0], hex_obj.position[1])
# Draw each hex's lines
for line_index in range(len(hex_lines)):
# Shift the first line over by 1
x_offset = 1 if line_index == 0 else 0
# Get position
position = self.terminal.move(self.center[1] + line_index + coords[1], x_offset + self.center[0] + coords[0])
# Print the line
print(position + hex_lines[line_index])
# Draw a point on the hex
def render_point(self, point_obj):
# Get the building
building = point_obj.building
# Check it exists
if building != None:
# Check the point's coordinates
coords = self.get_point_coords(point_obj.position[0], point_obj.position[1])
# Draw a dot there
position = self.terminal.move(self.center[1] + coords[1], self.center[0] + coords[0])
# Get the owner of the point
owner = building.owner
print(self.player_colors[owner] + position + "." + self.terminal.normal)
# Render a road onto the board
def render_road(self, road_obj):
# Position to draw the road
pos = [0, 0]
# String to draw representing the road
# Should be either "\", "/" or "___"
road_str = ""
# Get the points
point_one_pos = road_obj.point_one
point_two_pos = road_obj.point_two
# Get their coordinates
p_one_coords = self.get_point_coords(point_one_pos[0], point_one_pos[1])
p_two_coords = self.get_point_coords(point_two_pos[0], point_two_pos[1])
# If they're on the same line
if p_one_coords[1] == p_two_coords[1]:
# Just draw a line between them
pos = [min(p_one_coords[0], p_two_coords[0]), p_one_coords[1]]
road_str = "___"
else:
if p_one_coords[0] < p_two_coords[0]:
if p_one_coords[1] < p_two_coords[1]:
road_str = "\\"
else:
road_str = "/"
else:
if p_one_coords[1] < p_two_coords[1]:
road_str = "/"
else:
road_str = "\\"
pos = [min(p_one_coords[0], p_two_coords[0]) + 1, max(p_two_coords[1], p_one_coords[1])]
# Get position
render_pos = self.terminal.move(pos[1] + self.center[1], pos[0] + self.center[0])
# Print the road
print(self.player_colors[road_obj.owner] + render_pos + road_str)
# Get the x, y coordinates for a hex from a row and index
def get_render_coords(self, row, index):
# Initial coords
x = 0
y = 0
# Width/Height of each hex
# Each row is futher left than the previous, so decrease x based on row
x -= 4 * row
# Each row is also half a hex further down than the previous one
y += 1 * row
# Each index moves the hex down and to the right half a hex each
x += 4 * index
y += 1 * index
# If the row is in the bottom half, it should move the hex down and to the left
length = len(self.board.hexes)
if row > length / 2:
# Move if one hex to the right for every row between its row and the halfway row
x += 4 * math.ceil(row - length / 2)
# Move it one hex down for every row between its row and the halfway row
y += 1 + math.floor(row - length / 2)
# Return coords
return [x, y]
# Get the x, y coordinates for a point from a row and index
def get_point_coords(self, row, index):
# Initial coords
x = 1
y = 0
# Each row moves the point down
# Do different positioning if the row is in the top/bottom half of the board
half_length = math.floor(len(self.board.points) / 2)
if row < half_length:
# Each index moves the point over two
x += 2 * index
# Each second index moves the point down one
y += 1 * math.floor(index / 2)
# Each row moves the point down and to the left
x -= 4 * row
y += 1 * row
# If the row is in the bottom half, the point should be moved down and to the right
if row >= half_length:
diff = row - half_length
# Move the point to the first position in the bottom row
x -= 4 * half_length - 2
y += half_length
# Move down for each row
y += 2 * diff
# Move down and to the right for each index
y += math.ceil(index / 2)
x += 2 * index
# Return point
return [x, y]
# Get a 1 letter long string representation on a certain hex type
@staticmethod
def get_hex_type_string(hex_type):
if hex_type == HexType.HILLS:
return "H"
elif hex_type == HexType.MOUNTAINS:
return "M"
elif hex_type == HexType.PASTURE:
return "P"
elif hex_type == HexType.FOREST:
return "F"
elif hex_type == HexType.FIELDS:
# Since F is already used, use W for "wheat"
return "W"
elif hex_type == HexType.DESERT:
return "D"
else:
raise Exception("Unknown HexType %s passed to get_hex_type_string" % hex_type)
if __name__ == "__main__":
g = Game()
br = BoardRenderer(g.board, [50, 10])
# Add some settlements
g.add_settlement(player=0, r=0, i=0, is_starting=True)
g.add_settlement(player=1, r=2, i=3, is_starting=True)
g.add_settlement(player=2, r=4, i=1, is_starting=True)
# Add some roads
g.add_road(player=0, start=[0, 0], end=[0, 1], is_starting=True)
g.add_road(player=1, start=[2, 3], end=[2, 2], is_starting=True)
g.add_road(player=2, start=[4, 1], end=[4, 0], is_starting=True)
br.render()
```
#### File: PythonCatanLibrary/pycatan/default_board.py
```python
from pycatan.board import Board
from pycatan.tile import Tile
from pycatan.point import Point
from pycatan.tile_type import TileType
from pycatan.harbor import Harbor, HarborType
import math
import random
# The default, tileagonal board filled with random tiles and tokens
class DefaultBoard(Board):
def __init__(self, game):
super(DefaultBoard, self).__init__(game)
# Set tiles
tile_deck = Board.get_shuffled_tile_deck()
token_deck = Board.get_shuffled_tile_nums()
temp_tiles = []
for r in range(5):
temp_tiles.append([])
for i in range([3, 4, 5, 4, 3][r]):
# Add a tile
new_tile = Tile(type=tile_deck.pop(), token_num=None, position=[r, i], points=[])
temp_tiles[-1].append(new_tile)
# Remove the token if it is the desert
if new_tile.type == TileType.Desert:
self.robber = [r, i]
else:
new_tile.token_num = token_deck.pop()
self.tiles = tuple(map(lambda x: tuple(x), temp_tiles))
# Add points
temp_points = []
for r in range(6):
temp_points.append([])
for i in range([7, 9, 11, 11, 9, 7][r]):
point = Point(tiles=[], position=[r, i])
temp_points[-1].append(point)
# Set point/tile relations
for pos in DefaultBoard.get_tile_indexes_for_point(r, i):
point.tiles.append(self.tiles[pos[0]][pos[1]])
self.tiles[pos[0]][pos[1]].points.append(point)
self.points = tuple(map(lambda x: tuple(x), temp_points))
# Set the connected points for each point
# Must be done after initializing each point so that the point object exists
for r in self.points:
for p in r:
p.connected_points = self.get_connected_points(p.position[0], p.position[1])
# adds a harbor for each points in the pattern 2 3 2 2 3 2 etc
outside_points = DefaultBoard.get_outside_points()
# the pattern of spaces between harbors
pattern = [1, 2, 1]
# the current index of pattern
index = 0
# the different types of harbors
harbor_types = [
HarborType.Wood,
HarborType.Brick,
HarborType.Ore,
HarborType.Wheat,
HarborType.Sheep,
HarborType.Any,
HarborType.Any,
HarborType.Any,
HarborType.Any
]
# Shuffles the harbors
random.shuffle(harbor_types)
# Run loop until harbor_types is empty
while harbor_types:
# Create a new harbor
p_one = outside_points.pop()
p_two = outside_points.pop()
harbor = Harbor(
point_one = self.points[p_one[0]][p_one[1]],
point_two = self.points[p_two[0]][p_two[1]],
type = harbor_types.pop())
# Add it to harbors
self.harbors.append(harbor)
# Remove the unused points from outside_points
for _ in range(pattern[index % len(pattern)]):
outside_points.pop()
# Use next pattern value for number of points inbetween next time
index += 1
# puts the robber on the desert tile to start
for r in range(len(temp_tiles)):
# checks if this row has the desert
if temp_tiles[r].count(TileType.Desert) > 0:
# places the robber
self.robber = [r, temp_tiles[r].index(TileType.Desert)]
# Returns the indexes of the tiles connected to a certain points
# on the default, tileagonal Catan board
@staticmethod
def get_tile_indexes_for_point(r, i):
# the indexes of the tiles
tile_indexes = []
# Points on a tileagonal board
points = [
[None] * 7,
[None] * 9,
[None] * 11,
[None] * 11,
[None] * 9,
[None] * 7
]
# gets the adjacent tiles differently depending on whether the point is in the top or the bottom
if r < len(points) / 2:
# gets the tiles below the point ------------------
# adds the tiles to the right
if i < len(points[r]) - 1:
tile_indexes.append([r, math.floor(i / 2)])
# if the index is even, the number is between two tiles
if i % 2 == 0 and i > 0:
tile_indexes.append([r, math.floor(i / 2) - 1])
# gets the tiles above the point ------------------
if r > 0:
# gets the tile to the right
if i > 0 and i < len(points[r]) - 2:
tile_indexes.append([r - 1, math.floor((i - 1) / 2)])
# gets the tile to the left
if i % 2 == 1 and i < len(points[r]) - 1 and i > 1:
tile_indexes.append([r - 1, math.floor((i - 1) / 2) - 1])
else:
# adds the below -------------
if r < len(points) - 1:
# gets the tile to the right or directly below
if i < len(points[r]) - 2 and i > 0:
tile_indexes.append([r, math.floor((i - 1) / 2)])
# gets the tile to the left
if i % 2 == 1 and i > 1 and i < len(points[r]):
tile_indexes.append([r, math.floor((i - 1) / 2 - 1)])
# gets the tiles above ------------
# gets the tile above and to the right or directly above
if i < len(points[r]) - 1:
tile_indexes.append([r - 1, math.floor(i / 2)])
# gets the tile to the left
if i > 1 and i % 2 == 0:
tile_indexes.append([r - 1, math.floor((i - 1) / 2)])
return tile_indexes
# gets the points that are connected to the point given
def get_connected_points(self, r, i):
to_return = []
# Get the point to the left and the right
if i > 0:
to_return.append(self.points[r][i - 1])
if i < len(self.points[r]) - 1:
to_return.append(self.points[r][i + 1])
# Get the point above and below
# First, if the point is in the center two rows, the connected point
# is either directly above/below this point
if r == 2 and i % 2 == 0:
to_return.append(self.points[r + 1][i])
elif r == 3 and i % 2 == 0:
to_return.append(self.points[r - 1][i])
# If the point is not in the 2 center rows, the point will have an offset
elif r < len(self.points) / 2:
if i % 2 == 0:
to_return.append(self.points[r + 1][i + 1])
elif r > 0 and i > 0:
to_return.append(self.points[r - 1][i - 1])
else:
if i % 2 == 0:
to_return.append(self.points[r - 1][i + 1])
elif r < len(self.points) - 1 and i > 0:
to_return.append(self.points[r + 1][i - 1])
return to_return
# Get the points along the outside of the board, in clockwise order
@staticmethod
def get_outside_points():
# The lengths of each row of points on the board
row_lengths = [
7,
9,
11,
11,
9,
7
]
# The points on the bottom
bottom = list(map(lambda x: [len(row_lengths) - 1, x], range(row_lengths[-1])))
# The points on the top
top = list(map(lambda x: [0, x], range(row_lengths[0])))
# adds all the points on the right and left
right = []
left = []
for r in range(1, len(row_lengths) - 1):
# Get the last two and first two points on this row
last_two = list(map(lambda x: [r, x], range(row_lengths[r])[-2:]))
first_two = list(map(lambda x: [r, x], reversed(range(2))))
# If the points are one the bottom half of the board, reverse them
if r > (len(row_lengths) - 1) / 2:
last_two = list(reversed(last_two))
first_two = list(reversed(first_two))
# Add points to right and left
right.extend(last_two)
left.extend(first_two)
# Put different sides of points in order
# bottom and left are reversed since we want to count those points in reverse order
# to make sure we go in clockwise order
outside_points = []
outside_points.extend(top)
outside_points.extend(right)
outside_points.extend(reversed(bottom))
outside_points.extend(reversed(left))
# Return them
return outside_points
```
#### File: PythonCatanLibrary/pycatan/harbor.py
```python
from enum import Enum
from pycatan.card import ResCard
# The different types of harbors found throughout the game
class HarborType(Enum):
# the different 2:1 types
Wood = 0
Sheep = 1
Brick = 2
Wheat = 3
Ore = 4
# the 3:1 type
Any = 5
# represents a catan harbor
class Harbor:
def __init__(self, point_one, point_two, type):
# sets the type
self.type = type
# sets the points
self.point_one = point_one
self.point_two = point_two
def __repr__(self):
return "Harbor %s, %s Type %s" % (self.point_one, self.point_two, self.type)
def get_points(self):
return [self.point_one, self.point_two]
# returns a string representation of the type
# Ex: 3:1, 2:1S, 2:1Wh
def get_type(self):
if self.type == HarborType.Wood:
return "2:1W"
elif self.type == HarborType.Sheep:
return "2:1S"
elif self.type == HarborType.Brick:
return "2:1B"
elif self.type == HarborType.Wheat:
return "2:1Wh"
elif self.type == HarborType.Ore:
return "2:1O"
elif self.type == HarborType.Any:
return "3:1"
@staticmethod
def get_card_from_harbor_type(h_type):
if h_type == HarborType.Wood:
return ResCard.Wood
elif h_type == HarborType.Brick:
return ResCard.Brick
elif h_type == HarborType.Wheat:
return ResCard.Wheat
elif h_type == HarborType.Ore:
return ResCard.Ore
elif h_type == HarborType.Sheep:
return ResCard.Sheep
elif h_type == HarborType.Any:
return None
else:
raise Exception("Harbor has invalid type %s" % h_type)
```
#### File: PythonCatanLibrary/pycatan/player.py
```python
from pycatan.building import Building
from pycatan.statuses import Statuses
from pycatan.card import ResCard, DevCard
import math
# The player class for
class Player:
def __init__ (self, game, num):
# the game the player belongs to
self.game = game
# the player number for this player
self.num = num
# the starting roads for this player
# used to determine the longest road
self.starting_roads = []
# the number of victory points
self.victory_points = 0
# the cards the player has
# each will be a number corresponding with the static variables CARD_<type>
self.cards = []
# the development cards this player has
self.dev_cards = []
# the number of knight cards the player has played
self.knight_cards = 0
# the longest road segment this player has
self.longest_road_length = 0
# builds a settlement belonging to this player
def build_settlement(self, point, is_starting=False):
if not is_starting:
# makes sure the player has the cards to build a settlements
cards_needed = [
ResCard.Wood,
ResCard.Brick,
ResCard.Sheep,
ResCard.Wheat
]
# checks the player has the cards
if not self.has_cards(cards_needed):
return Statuses.ERR_CARDS
# checks it is connected to a road owned by the player
connected_by_road = False
# gets the roads
roads = self.game.board.roads
for r in roads:
# checks if the road is connected
if r.point_one is point or r.point_two is point:
# checks this player owns the road
if r.owner == self.num:
connected_by_road = True
if not connected_by_road:
return Statuses.ERR_ISOLATED
# checks that a building does not already exist there
if point.building != None:
return Statuses.ERR_BLOCKED
# checks all other settlements are at least 2 away
# gets the connecting point's coords
points = point.connected_points
for p in points:
# checks if the point is occupied
if p.building != None:
return Statuses.ERR_BLOCKED
if not is_starting:
# removes the cards
self.remove_cards(cards_needed)
# adds the settlement
self.game.board.add_building(Building(
owner = self.num,
type = Building.BUILDING_SETTLEMENT,
point_one = point),
point = point)
# adds a victory point
self.victory_points += 1
return Statuses.ALL_GOOD
# checks if the player has all of the cards given in an array
def has_cards(self, cards):
# needs to duplicate the cards, and then delete them once found
# otherwise checking if the player has multiple of the same card
# will return true with only one card
# cards_dup stands for cards duplicate
cards_dup = self.cards[:]
for c in cards:
if cards_dup.count(c) == 0:
return False
else:
index = cards_dup.index(c)
del cards_dup[index]
return True
# adds some cards to a player's hand
def add_cards(self, cards):
for c in cards:
self.cards.append(c)
# removes cards from a player's hand
def remove_cards(self, cards):
# makes sure it has all the cards before deleting any
if not self.has_cards(cards):
return Statuses.ERR_CARDS
else:
# removes the cards
for c in cards:
index = self.cards.index(c)
del self.cards[index]
#adds a development card
def add_dev_card(self, dev_card):
self.dev_cards.append(dev_card)
# removes a dev card
def remove_dev_card(self, card):
# finds the card
for i in range(len(self.dev_cards)):
if self.dev_cards[i] == card:
# deletes the card
del self.dev_cards[i]
return Statuses.ALL_GOOD
# error if the player does not have the cards
return Statuses.ERR_CARDS
# checks a road location is valid
def road_location_is_valid(self, start, end):
# checks the two points are connected
connected = False
# gets the points connected to start
points = start.connected_points
for p in points:
if end == p:
connected = True
break
if not connected:
return Statuses.ERR_NOT_CON
connected_by_road = False
for road in self.game.board.roads:
# checks the road does not already exists with these points
if road.point_one == start or road.point_two == start:
if road.point_one == end or road.point_two == end:
return Statuses.ERR_BLOCKED
# check this player has a settlement on one of these points or a connecting road
is_connected = False
if start.building != None:
# checks if this player owns the settlement/city
if start.building.owner == self.num:
is_connected = True
# does the same for the other point
elif end.building != None:
if end.building.owner == self.num:
is_connected = True
# then checks if there is a road connecting them
roads = self.game.board.roads
points = [start, end]
for r in roads:
for p in points:
if r.point_one == p or r.point_two == p:
# checks that there is not another player's settlement here, so that it's not going through it
if p.building == None:
is_connected = True
# if theere is a settlement/city there, the road can be built if this player owns it
elif p.building.owner == self.num:
is_connected = True
if not is_connected:
return Statuses.ERR_ISOLATED
return Statuses.ALL_GOOD
# builds a road
def build_road(self, start, end, is_starting=False):
# checks the location is valid
location_status = self.road_location_is_valid(start=start, end=end)
if not location_status == Statuses.ALL_GOOD:
return location_status
# if the road is being created on the starting turn, the player does not needed
# to have the cards
if not is_starting:
# checks that it has the proper cards
cards_needed = [
ResCard.Wood,
ResCard.Brick
]
if not self.has_cards(cards_needed):
return Statuses.ERR_CARDS
# removes the cards
self.remove_cards(cards_needed)
# adds the road
road = Building(owner=self.num, type=Building.BUILDING_ROAD, point_one=start, point_two=end)
(self.game).board.add_road(road)
self.get_longest_road(new_road=road)
return Statuses.ALL_GOOD
# returns an array of all the harbors the player has access to
def get_connected_harbor_types(self):
# gets the settlements/cities belonging to this player
harbors = []
all_harbors = self.game.board.harbors
buildings = self.game.board.get_buildings()
for b in buildings:
# checks the building belongs to this player
if b.owner == self.num:
# checks if the building is connected to any harbors
for h in all_harbors:
print(h)
print(b.point)
if h.point_one is b.point or h.point_two is b.point:
print("A")
# adds the type
if harbors.count(h.type) == 0:
harbors.append(h.type)
return harbors
# gets the longest road segment this player has which includes the road given
# should be called whenever a new road is build
# since this player's longest road will only change if a new road is build
def get_longest_road(self, new_road):
# gets the roads that belong to this player
roads = self.get_roads()
del roads[roads.index(new_road)]
# checks for longest road
self.check_connected_roads(road=new_road, all_roads=roads, length=1)
# checks the roads for connected roads, and then checks those roads until there are no more
def check_connected_roads(self, road, all_roads, length):
# do both point one and two
points = [
road.point_one,
road.point_two
]
for p in points:
# gets the connected roads
connected = self.get_connected_roads(point=p, roads=all_roads)
# if there are no new connected roads
if len(connected) == 0:
# if this is the longest road so far
if length > self.longest_road_length:
# records the length
self.longest_road_length = length
# self.begin_celebration()
# if there are connected roads
else:
# check each of them for connections if they have not been used
for c in connected:
# checks it hasn't used this road before
if all_roads.count(c) > 0:
# copies all usable roads
c_roads = all_roads[:]
# removes this road from them
del c_roads[c_roads.index(c)]
# checks for connected roads to this road
self.check_connected_roads(c, c_roads, length + 1)
# returns which roads in the roads array are connected to the point
def get_connected_roads(self, point, roads):
con_roads = []
for r in roads:
if r.point_one == point or r.point_two == point:
con_roads.append(r)
return con_roads
# returns an array of all the roads belonging to this player
def get_roads(self):
# gets all the roads on the board
all_roads = (self.game).board.roads
# filters out roads that do not belong to this player
roads = []
for r in all_roads:
if r.owner == self.num:
roads.append(r)
return roads
# checks if the player has some development cards
def has_dev_cards(self, cards):
card_duplicate = self.dev_cards[:]
for c in cards:
if not card_duplicate.count(c) > 0:
return False
else:
del card_duplicate[card_duplicate.index(c)]
return True
# returns the number of VP
# if include_dev is False, it will not include points from developement cards
# because other players aren't able to see them
def get_VP(self, include_dev=False):
# gets the victory points from settlements and cities
points = self.victory_points
# adds VPs from longest road
if self.game.longest_road_owner == self.num:
points += 2
# adds VPs from largest army
if self.game.largest_army == self.num:
points += 2
# adds VPs from developement cards
if include_dev:
for d in self.dev_cards:
if d == DevCard.VP:
points += 1
return points
# prints the cards given
@staticmethod
def print_cards(cards):
print("[")
for c in cards:
card_name = ""
if c == ResCard.Wood:
card_name = "Wood"
elif c == ResCard.Sheep:
card_name = "Sheep"
elif c == ResCard.Brick:
card_name = "Brick"
elif c == ResCard.Wheat:
card_name = "Wheat"
elif c == ResCard.Ore:
card_name = "Ore"
else:
print("INVALID CARD %s" % c)
continue
if cards.index(c) < len(cards) - 1:
card_name += ","
print(" %s" % card_name)
print("]")
```
#### File: PythonCatanLibrary/tests/test_game.py
```python
from pycatan.game import Game
from pycatan.building import Building
from pycatan.card import ResCard
from pycatan.statuses import Statuses
from pycatan.harbor import HarborType
import random
class TestGame:
def test_game_uses_three_players_by_default(self):
game = Game()
assert len(game.players) == 3
def test_game_starts_with_variable_players(self):
game = Game(num_of_players=5)
assert len(game.players) == 5
def test_adding_starting_settlements(self):
# Create game
g = Game();
# Make sure creating a starting settlement does not use any cards
g.players[0].add_cards([
ResCard.Wood,
ResCard.Brick,
ResCard.Sheep,
ResCard.Wheat
])
# Test adding a starting settlement, i.e. no cards needed
res = g.add_settlement(0, g.board.points[0][0], True)
assert res == Statuses.ALL_GOOD
assert g.board.points[0][0].building != None
assert g.board.points[0][0].building.type == Building.BUILDING_SETTLEMENT
assert g.board.points[0][0].building.point is g.board.points[0][0]
assert len(g.players[0].cards) == 4
# Test adding a settlement too close to another settlement
res = g.add_settlement(1, g.board.points[0][1], True)
assert res == Statuses.ERR_BLOCKED
# Test adding a settlement the correct distance away
res = g.add_settlement(2, g.board.points[0][2], True)
assert res == Statuses.ALL_GOOD
def test_adding_starting_roads(self):
# Create game
g = Game()
# Add starting settlement
g.add_settlement(0, g.board.points[0][0], True)
# Try adding a road
res = g.add_road(0, g.board.points[0][0], g.board.points[0][1], True)
assert res == Statuses.ALL_GOOD
res = g.add_road(0, g.board.points[1][1], g.board.points[0][0], True)
assert res == Statuses.ALL_GOOD
# Try adding a disconnected road
res = g.add_road(0, g.board.points[2][0], g.board.points[2][1], True)
assert res == Statuses.ERR_ISOLATED
# Try adding a road whose point's are not connected
res = g.add_road(0, g.board.points[0][0], g.board.points[5][5], True)
assert res == Statuses.ERR_NOT_CON
# Try adding a road connected to another player's settlement
g.add_settlement(1, g.board.points[2][2], True)
res = g.add_road(0, g.board.points[2][2], g.board.points[2][3], True)
assert res == Statuses.ERR_ISOLATED
# Test that player.add_settlement returns the proper value
def test_add_settlement(self):
g = Game()
# Try to add a settlement without the cards
g.add_settlement(0, g.board.points[0][0])
# Add cards to build a settlement
g.players[0].add_cards([
ResCard.Wood,
ResCard.Brick,
ResCard.Sheep,
ResCard.Wheat
])
# Try adding an isolated settlement
res = g.add_settlement(0, g.board.points[0][0])
assert res == Statuses.ERR_ISOLATED
assert g.board.points[0][0].building == None
# Add starting settlement and two roads to ensure there is an available position
assert g.add_settlement(0, g.board.points[0][2], True) == Statuses.ALL_GOOD
assert g.add_road(0, g.board.points[0][2], g.board.points[0][1], True) == Statuses.ALL_GOOD
assert g.add_road(0, g.board.points[0][0], g.board.points[0][1], True) == Statuses.ALL_GOOD
res = g.add_settlement(0, g.board.points[0][0])
assert res == Statuses.ALL_GOOD
assert g.board.points[0][0].building != None
assert g.board.points[0][0].building.type == Building.BUILDING_SETTLEMENT
# Test trading in cards either directly through the bank
def test_trade_in_cards_through_bank(self):
g = Game()
# Add 4 wood cards to player 0
g.players[0].add_cards([ResCard.Wood] * 4)
# Try to trade in for 1 wheat
res = g.trade_to_bank(player=0, cards=[ResCard.Wood] * 4, request=ResCard.Wheat)
assert res == Statuses.ALL_GOOD
assert not g.players[0].has_cards([ResCard.Wood])
assert g.players[0].has_cards([ResCard.Wheat])
# Try to trade in cards the player doesn't have
res = g.trade_to_bank(player=0, cards=[ResCard.Brick] * 4, request=ResCard.Ore)
assert res == Statuses.ERR_CARDS
assert not g.players[0].has_cards([ResCard.Ore])
# Try to trade in with less than 4 cards, but more than 0
g.players[0].add_cards([ResCard.Brick] * 3)
res = g.trade_to_bank(player=0, cards=[ResCard.Brick] * 4, request=ResCard.Sheep)
assert res == Statuses.ERR_CARDS
assert g.players[0].has_cards([ResCard.Brick] * 3)
assert not g.players[0].has_cards([ResCard.Sheep])
def test_trade_in_cards_through_harbor(self):
g = Game();
# Add Settlement next to the harbor on the top
res = g.add_settlement(0, g.board.points[0][2], is_starting=True)
assert res == Statuses.ALL_GOOD
# Make the harbor trade in ore for testing
for h in g.board.harbors:
if g.board.points[0][2] in h.get_points():
h.type = HarborType.Ore
print("found harbor lmao")
g.players[0].add_cards([ResCard.Ore] * 2)
# Try to use harbor
res = g.trade_to_bank(player=0, cards=[ResCard.Ore] * 2, request=ResCard.Wheat)
assert res == Statuses.ALL_GOOD
assert g.players[0].has_cards([ResCard.Wheat])
assert not g.players[0].has_cards([ResCard.Ore])
# Try to trade in to a harbor that the player does not have access to
g.players[0].add_cards([ResCard.Brick] * 2)
res = g.trade_to_bank(player=0, cards=[ResCard.Brick] * 2, request=ResCard.Sheep)
assert res == Statuses.ERR_HARBOR
assert g.players[0].has_cards([ResCard.Brick] * 2)
assert not g.players[0].has_cards([ResCard.Sheep])
# Try to trade without the proper cards
assert not g.players[0].has_cards([ResCard.Ore])
res = g.trade_to_bank(player=0, cards=[ResCard.Ore] * 2, request=ResCard.Sheep)
assert res == Statuses.ERR_CARDS
assert not g.players[0].has_cards([ResCard.Sheep])
# Try to trade with more cards than the player has
g.players[0].add_cards([ResCard.Ore])
res = g.trade_to_bank(player=0, cards=[ResCard.Ore] * 2, request=ResCard.Sheep)
assert res == Statuses.ERR_CARDS
assert not g.players[0].has_cards([ResCard.Sheep])
assert g.players[0].has_cards([ResCard.Ore])
def test_moving_robber(self):
random.seed(1)
g = Game()
# Move the robber
g.move_robber(g.board.tiles[0][0], None, None)
assert g.board.robber is g.board.tiles[0][0]
# Build a settlement at 1, 1
g.add_settlement(player=0, point=g.board.points[1][1], is_starting=True)
# Roll an 8
g.add_yield_for_roll(8)
# Ensure the player got nothing since the robber was there
assert len(g.players[0].cards) == 0
# Give the player a brick to steal
g.players[0].add_cards([ResCard.Brick])
# Move the robber to 1, 0 and steal the brick
g.move_robber(g.board.tiles[1][0], 1, 0)
# Make sure they stole the brick
assert g.players[1].has_cards([ResCard.Brick])
``` |
{
"source": "josegallo/python-basurilla",
"score": 4
} |
#### File: josegallo/python-basurilla/Basurilla.py
```python
import simplegui
import random
import math
# Canvas size
width = 1000
height = 700
# Players initial features
k = {0: "left", 1:"right", 2:"up", 3:"down", 4:"a", 5:"s", 6:"w", 7:"s"}
player_pos = [(width / 2)*0.5, height / 2]
player_pos_2 = [(width / 2)*1.5, height / 2]
color_player = "White"
#Name_1 = ""
#Name_2 = ""
text = ""
radius = 20
color = "White"
# Particule traits
colors =["Aqua","Blue","Fuchsia","Gray","Green","Lime", "Maroon", "Navy", "Olive", "Orange", \
"Purple","Red","Silver", "Teal","White", "Yellow"]
particules = []
remove = []
# math helper functions
def dot(v, w):
return v[0] * w[0] + v[1] * w[1]
def distance(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# classes
class RectangularDomain:
def __init__(self, width, height):
self.width = width
self.height = height
self.border = 2
# return if bounding circle is inside the domain
def inside(self, center, radius):
in_width = ((radius + self.border) < center[0] <
(self.width - self.border - radius))
in_height = ((radius + self.border) < center[1] <
(self.height - self.border - radius))
return in_width and in_height
# return a unit normal to the domain boundary point nearest center
def normal(self, center):
left_dist = center[0]
right_dist = self.width - center[0]
top_dist = center[1]
bottom_dist = self.height - center[1]
if left_dist < min(right_dist, top_dist, bottom_dist):
return (1, 0)
elif right_dist < min(left_dist, top_dist, bottom_dist):
return (-1, 0)
elif top_dist < min(bottom_dist, left_dist, right_dist):
return (0, 1)
else:
return (0, -1)
# if the ball go further the domain is recolocated inside the domain
def recolocate(self, center, radius):
if center[1] < radius + 2 :
center[1] = radius + 2
center[0] = center[0]
if center[1] > self.height - radius - 2:
center[1] = self.height - radius - 2
center[0] = center[0]
if center[0] < radius + 2:
center[0] = radius + 2
center[1] = center[1]
if center[0] > self.width - radius - 2:
center[0] = self.width - radius - 2
return [center[0], center[1]]
# return random location
def random_pos(self, radius):
x = random.randrange(radius, self.width - radius - self.border)
y = random.randrange(radius, self.height - radius - self.border)
return [x, y]
# Draw boundary of domain
def draw(self, canvas):
canvas.draw_polygon([[0, 0], [self.width, 0],
[self.width, self.height], [0, self.height]],
self.border*2, "Red")
class CircularDomain:
def __init__(self, center, radius):
self.center = center
self.radius = radius
self.border = 2
# return if bounding circle is inside the domain
def inside(self, center, radius):
dx = center[0] - self.center[0]
dy = center[1] - self.center[1]
dr = math.sqrt(dx ** 2 + dy ** 2)
return dr < (self.radius - radius - self.border)
# return a unit normal to the domain boundary point nearest center
def normal(self, center):
dx = center[0] - self.center[0]
dy = center[1] - self.center[1]
dr = math.sqrt(dx ** 2 + dy ** 2)
return [dx / dr, dy / dr]
# if the ball go further the domain is recolocated inside the domain
def recolocate(self, center, radius):
d_rec = distance(center, self.center) - self.radius + radius + 2
n = self.normal (center)
if ((distance(center, self.center) + radius + 2)) > self.radius:
center[0] = center[0] - d_rec * n[0]
center[1] = center[1] - d_rec * n[1]
return [center[0],center[1]]
# return random location
def random_pos(self, radius):
r = random.random() * (self.radius - radius - self.border)
theta = random.random() * 2 * math.pi
x = r * math.cos(theta) + self.center[0]
y = r * math.sin(theta) + self.center[1]
return [x, y]
# Draw boundary of domain
def draw(self, canvas):
canvas.draw_circle(self.center, self.radius, self.border*2, "Red")
class Player:
def __init__(self, radius, color, domain, init_pos):
self.radius = radius
self.color = color
self.domain = domain
if init_pos == "left":
self.pos = player_pos
if init_pos == "right":
self.pos = player_pos_2
self.vel = [0,0]
self.score = 0
self.name = ""
self.score = 0
self.w = ""
# bounce
def reflect(self):
norm = self.domain.normal(self.pos)
norm_length = dot(self.vel, norm)
self.vel[0] = self.vel[0] - 2 * norm_length * norm[0]
self.vel[1] = self.vel[1] - 2 * norm_length * norm[1]
def recolo(self):
rec = self.domain.recolocate(self.pos, self.radius)
self.pos[0] = rec[0]
self.pos[1] = rec[1]
# update ball position
def update(self):
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
if not self.domain.inside(self.pos, self.radius):
self.reflect()
self.recolo()
# move ball with keywords
def keydown1(self, key):
v_h = 2
v_v = 2
if key == simplegui.KEY_MAP["left"]:
self.vel[0] = - v_h
self.vel[1] = 0
elif key == simplegui.KEY_MAP["right"]:
self.vel = [v_h,0]
elif key == simplegui.KEY_MAP["down"]:
self.vel[0] = 0
self.vel[1] = v_v
elif key == simplegui.KEY_MAP["up"]:
self.vel[0] = 0
self.vel[1] = - v_v
def keydown2(self, key):
v_h = 2
v_v = 2
if key == simplegui.KEY_MAP["a"]:
self.vel[0] = - v_h
self.vel[1] = 0
elif key == simplegui.KEY_MAP["s"]:
self.vel = [v_h,0]
elif key == simplegui.KEY_MAP["x"]:
self.vel[0] = 0
self.vel[1] = v_v
elif key == simplegui.KEY_MAP["w"]:
self.vel[0] = 0
self.vel[1] = - v_v
# def keyup(self, key):
# self.vel = [0,0]
# # stop ball if key up
# absorb balls
def absorb(self, radius, position, color):
d = distance(position,self.pos)
if d <= (self.radius + radius):
self.radius = math.sqrt(self.radius**2 + radius**2)
self.color = color
# once there are not balls the bigger player eats the other
def absorb_w(self, radius, position, color):
global w
d = distance(position,self.pos)
if particules ==[]:
if d <= (self.radius + radius):
if self.radius > radius:
self.radius = math.sqrt(self.radius**2 + radius**2)
self.color = color
self.w = "Winner!"
self.vel = [0,0]
# the player go out the domain
def go_out(self, radius, position):
d = distance(position,self.pos)
if particules ==[]:
if d <= (self.radius + radius):
if self.radius < radius:
self.radius = 0.01
self.vel = [0,0]
self.pos = [21000,21000]
def draw(self, canvas, init_pos):
canvas.draw_circle(self.pos, self.radius, 2, "red", self.color)
canvas.draw_text(self.name, (self.pos[0] - 10, self.pos[1] + 5 ), 20, "Black")
canvas.draw_text(self.w, (self.pos[0] - 10, self.pos[1] + 25 ), 20, "Black")
if init_pos == "left":
canvas.draw_text(("Score " + str (self.name) + " " + str(self.score) + \
" " + "Radius = " + str(round(self.radius,1))),\
(width *0.125, 100), 20, "white")
if init_pos == "right":
canvas.draw_text(("Score " + str (self.name) + " " + str(self.score) + \
" " + "Radius = " + str(round(self.radius,1))),\
(width *0.625, 100), 20, "white")
class Ball:
def __init__(self, number, radius, color, domain):
self.radius = radius
self.color = color
self.domain = domain
self.number = number
self.pos = self.domain.random_pos(self.radius)
self.vel = [random.random() + .1, random.random() + .1]
def information(self):
return self.number
def position(self):
return self.pos
# bounce
def reflect(self):
norm = self.domain.normal(self.pos)
norm_length = dot(self.vel, norm)
self.vel[0] = self.vel[0] - 2 * norm_length * norm[0]
self.vel[1] = self.vel[1] - 2 * norm_length * norm[1]
# absorb
def absorb(self, radius, position):
d = distance(position, self.pos)
if d + 1 <= (self.radius + radius):
self.radius = 0.1
self.vel = [0,0]
self.pos = [-100,-100]
if position == player1.pos:
player1.score += 1
if position == player2.pos:
player2.score += 1
# update ball position
def update(self):
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
if not self.domain.inside(self.pos, self.radius):
self.reflect()
# def position(self):
# return self.pos
# draw
def draw(self, canvas):
canvas.draw_circle(self.pos, self.radius, 1,
self.color, self.color)
# handlers and domains
def draw(canvas):
field.draw(canvas)
for p in particules:
p.draw(canvas)
p.update()
p.absorb(player1.radius, player1.pos)
p.absorb(player2.radius, player2.pos)
if p.position() == [-100,-100]:
particules.remove(p)
player1.draw(canvas, "left")
player1.update()
player2.draw(canvas, "right")
player2.update()
for i in range (len(particules)):
player1.absorb(particules[i].radius,particules[i].pos, particules[i].color)
player2.absorb(particules[i].radius,particules[i].pos, particules[i].color)
if player1.radius > player2.radius:
player1.absorb_w(player2.radius, player2.pos, player2.color)
player2.go_out(player1.radius, player1.pos)
if player2.radius > player1.radius:
player2.absorb_w(player1.radius, player1.pos, player1.color)
player1.go_out(player2.radius, player2.pos)
def input_handler1(text):
# global Name_1
player1.name = text
inp1.set_text("")
label1.set_text("Player Name 1 = " + text)
def input_handler2(text):
# global Name_2
player2.name = text
inp2.set_text("")
label2.set_text("Player Name 2 = " + text)
def input_handler3(text):
global n_balls, particules, d
n_balls = int (text)
inp3.set_text("")
label3.set_text("Number of balls = " + text)
for i in range (n_balls):
p = Ball(i,random.choice(range (5,20)),random.choice(colors),field)
particules.append(p)
print "particules =", particules
print "len =", len(particules)
field = RectangularDomain(width, height)
def keydown(key):
player1.keydown1(key)
player2.keydown2(key)
#def keyup(key):
# player1.keyup(key)
# player2.keyup(key)
def button_handler3():
frame.start()
def button_handler4():
player1.pos = [(width / 2)*0.5, height / 2]
player2.pos = [(width / 2)*1.5, height / 2]
player1.radius = player2.radius = radius
player1.vel = [0,0]
player2.vel = [0,0]
player1.color = player2.color = "white"
player1.score = player2.score = 0
player1.w = ""
player2.w = ""
def button_handler5():
global field
player1.domain = RectangularDomain(width, height)
player2.domain = RectangularDomain(width, height)
Ball.domain = RectangularDomain(width, height)
field = RectangularDomain(width, height)
def button_handler6():
global field
player1.domain = CircularDomain([width/2, height/2],height/2)
player2.domain = CircularDomain([width/2, height/2],height/2)
Ball.domain = CircularDomain([width/2, height/2],height/2)
field = CircularDomain([width/2, height/2],height/2)
player1 = Player(radius, color_player, field, "left")
player2 = Player(radius, color_player, field, "right")
frame = simplegui.create_frame("Basurilla", width, height)
# register even handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
#frame.set_keyup_handler(keyup)
button3 = frame.add_button('Start/Faster', button_handler3)
inp1 = frame.add_input("Player Name 1 = ", input_handler1, 50)
label1 = frame.add_label('')
inp2 = frame.add_input("Player Name 2 = ", input_handler2, 50)
label2 = frame.add_label('')
inp3 = frame.add_input("Number of balls = ", input_handler3, 50)
label3 = frame.add_label('')
button4 = frame.add_button('ReStart', button_handler4)
##label5 = frame.add_label("Rectangular Domain")
#button5 = frame.add_button('Rectangular Domain', button_handler5)
#button6 = frame.add_button('Circular Domain', button_handler6)
``` |
{
"source": "josegcardenas/web-scraping-challenge",
"score": 3
} |
#### File: web-scraping-challenge/Mission_to_Mars/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import time
data = {}
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Go to the NASA Mars News Site
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
# Create a Beautiful Soup object
soup = bs(browser.html, 'lxml')
news_title = soup.find_all('div', class_ = 'content_title')
news_articles = []
for news in news_title:
if (news.a):
if (news.a.text):
news_articles.append(news.a.text)
# Print paragraph for the latest news article
news_story = soup.find_all('div', class_ = 'article_teaser_body')
news_paragraph = []
for paragraph in news_story:
if (paragraph.text):
news_paragraph.append(paragraph.text)
# Create variables for our latest news article and paragraph
first_article = news_articles[0]
news_p = news_paragraph[0]
data["news_title"] = first_article
data["news_paragraph"] = news_p
# Visit the url for JPL Featured Space Image
url_2 = 'https://www.jpl.nasa.gov/images?search=&category=Mars'
browser.visit(url_2)
# Create a Beautiful Soup object
soup2 = bs(browser.html, 'lxml')
# Find and append the links(href) for each image featured on the page
article_images = soup2.find_all('a', class_="group cursor-pointer block")
image_links = []
for image in article_images:
image_links.append(image['href'])
# Scrape through the first href and find the full sized image url
soup2 = bs(browser.html, 'lxml')
domain_url = 'https://' + browser.url.replace('http://','').replace('https://','').split('/', 1)[0]
browser.visit(domain_url + image_links[0])
soup3 = bs(browser.html, 'lxml')
img_url = soup3.find_all('div', class_ = "lg:w-auto w-full")
img_href = []
for i in img_url:
if (i.a):
if (i.a['href']):
img_href.append(i.a['href'])
featured_image_url = img_href[0]
data["featured_image"] = featured_image_url
# Visit the Mars Facts webpage
url_3 = 'https://space-facts.com/mars/'
browser.visit(url_3)
# Create a Beautiful Soup object
soup3 = bs(browser.html, 'lxml')
# Scrape the table containing facts about the planet including Diameter, Mass, etc.
mars_facts = pd.read_html(browser.html)
table_df = mars_facts[0]
# Use Pandas to convert the data to a HTML table string.
table_df.columns = ["description", "value"]
data["facts"] = table_df.to_html(index=False)
# Bring in the USGS Astrogeology site for our web scrapping
url_4 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url_4)
# Create a Beautiful Soup object
soup4 = bs(browser.html, 'lxml')
hemisphere_image_urls = []
# Run a for loop to click through our hemisphere links in order to
# append the titles & urls for the full resolution hemisphere images
links = browser.find_by_css("a.product-item h3")
for item in range(len(links)):
hemisphere = {}
browser.find_by_css("a.product-item h3")[item].click()
# find urls for the full resolution hemisphere images
aref_list = browser.find_link_by_text("Sample").first
hemisphere["img_url"] = aref_list["href"]
# find the titles for our hemisphere images
hemisphere["title"] = browser.find_by_css("h2.title").text
# append titles & urls for our hemisphere images
hemisphere_image_urls.append(hemisphere)
browser.back()
data["hemispheres"] = hemisphere_image_urls
browser.quit()
return data
``` |
{
"source": "josegcpa/haemorasis",
"score": 3
} |
#### File: scripts/python/image_generator.py
```python
import numpy as np
import openslide
from openslide import OpenSlideError
from multiprocessing import Queue,Process
def image_generator(quality_csv_path,slide_path,
h=512,w=512,extra_padding=128):
OS = openslide.OpenSlide(slide_path)
dims = OS.dimensions
with open(quality_csv_path) as o:
lines = [x.strip() for x in o.readlines() if 'OUT,' in x]
positives = []
negatives = []
for line in lines:
data = line.split(',')
if float(data[-1]) >= 0.5:
positives.append([int(data[1]),int(data[2])])
else:
negatives.append([int(data[1]),int(data[2])])
for x,y in positives:
try:
x = x - extra_padding
y = y - extra_padding
x = np.maximum(0,x)
y = np.maximum(0,y)
if x + h + (2*extra_padding) > dims[0]:
x = dims[0] - h - (extra_padding*2)
if y + w + (2*extra_padding) > dims[1]:
y = dims[1] - w - (extra_padding*2)
image = OS.read_region(
(x,y),0,
(h+(extra_padding*2),w+(extra_padding*2)))
image = np.array(image)[:,:,:3]
yield image,[x,y]
except OpenSlideError as error:
OS = openslide.OpenSlide(slide_path)
def image_generator_slide(slide_path,
height=512,width=512):
OS = openslide.OpenSlide(slide_path)
dim = OS.dimensions
for x in range(0,dim[0],height):
for y in range(0,dim[1],width):
try:
im = OS.read_region((x,y),0,(height,width))
im = np.array(im)
im = im[:,:,:3]
yield im,'{},{}'.format(x,y)
except OpenSlideError as error:
OS = openslide.OpenSlide(slide_path)
class ImageGeneratorWithQueue:
def __init__(self,slide_path,csv_path,
extra_padding=128,maxsize=1,
height=512,width=512):
self.maxsize = maxsize
self.csv_path = csv_path
self.slide_path = slide_path
self.extra_padding = extra_padding
self.height = height
self.width = width
self.q = Queue(self.maxsize)
self.p = Process(
target=self.image_generator_w_q,
args=(self.q,csv_path,slide_path,extra_padding))
def image_generator_w_q(self,q,csv_path,slide_path,extra_padding):
if csv_path != None:
im_gen = image_generator(
quality_csv_path=csv_path,
slide_path=slide_path,
extra_padding=extra_padding)
else:
im_gen = image_generator_slide(
slide_path,
self.height,self.width)
for element in im_gen:
q.put(element)
q.put(None)
def start(self):
self.daemon = True
self.p.start()
def generate(self):
while True:
item = self.q.get()
if item is not None:
yield item
else:
self.p.join(120)
break
```
#### File: scripts/python/quality_control.py
```python
import argparse
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from glob import glob
from quality_net_utilities import *
from image_generator import *
n_channels = 3
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Predicts which tiles are of good quality in WBS.')
parser.add_argument('--slide_path',dest='slide_path',
action='store',type=str,default=None,
help="Path to slide.")
parser.add_argument('--input_height',dest = 'input_height',
action = 'store',type = int,default = 512,
help = 'The file extension for all images.')
parser.add_argument('--input_width',dest = 'input_width',
action = 'store',type = int,default = 512,
help = 'The file extension for all images.')
parser.add_argument('--checkpoint_path',dest = 'checkpoint_path',
action = 'store',type = str,default = 'summaries',
help = 'Path to checkpoint.')
parser.add_argument('--batch_size',dest = 'batch_size',
action = 'store',type = int,default = 4,
help = 'Size of mini batch.')
args = parser.parse_args()
quality_net = keras.models.load_model(args.checkpoint_path)
def generator():
G = image_generator_slide(
args.slide_path,args.input_height,args.input_width)
for image,coords in G:
image = image / 255.
yield image,coords
output_types = (tf.float32,tf.string)
output_shapes = (
[args.input_height,args.input_width,n_channels],[])
tf_dataset = tf.data.Dataset.from_generator(
generator,output_types=output_types,output_shapes=output_shapes)
tf_dataset = tf_dataset.batch(args.batch_size,drop_remainder=False)
tf_dataset = tf_dataset.prefetch(5)
for image,coords in tqdm(tf_dataset):
prediction = quality_net(image)
for c,p in zip(coords.numpy(),prediction.numpy()):
print('OUT,{},{},{}'.format(c.decode(),int(p>0.5),float(p)))
```
#### File: scripts/python/tf_da.py
```python
from math import pi
import tensorflow as tf
import cv2
from albumentations import ElasticTransform
class ImageAugmenter:
def __init__(self,
brightness_max_delta=16. / 255.,
saturation_lower=0.8,
saturation_upper=1.2,
hue_max_delta=0.2,
contrast_lower=0.8,
contrast_upper=1.2,
salt_prob=0.01,
pepper_prob=0.01,
noise_stddev=0.05,
blur_probability=0.1,
blur_size=3,
blur_mean=0.,
blur_std=0.05,
discrete_rotation=True,
min_jpeg_quality=30,
max_jpeg_quality=70,
elastic_transform_sigma=10,
elastic_transform_alpha_affine=0,
elastic_transform_p=0.7):
self.brightness_max_delta = brightness_max_delta
self.saturation_lower = saturation_lower
self.saturation_upper = saturation_upper
self.hue_max_delta = hue_max_delta
self.contrast_lower = contrast_lower
self.contrast_upper = contrast_upper
self.salt_prob = salt_prob
self.pepper_prob = pepper_prob
self.noise_stddev = noise_stddev
self.blur_probability = blur_probability
self.blur_size = blur_size
self.blur_mean = blur_mean
self.blur_std = blur_std
self.discrete_rotation = discrete_rotation
self.min_jpeg_quality = min_jpeg_quality
self.max_jpeg_quality = max_jpeg_quality
self.elastic_transform_sigma = elastic_transform_sigma
self.elastic_transform_alpha_affine = elastic_transform_alpha_affine
self.elastic_transform_p = elastic_transform_p
def __str__(self):
return "ImageAugmenter class"
def augment(self,image,*masks):
image = tf.image.convert_image_dtype(image,tf.float32)
masks = [tf.image.convert_image_dtype(m,tf.float32) for m in masks]
if self.elastic_transform_p > 0:
image,masks = elastic_transform(
image,*masks,
sigma=self.elastic_transform_sigma,
alpha_affine=self.elastic_transform_alpha_affine,
p=self.elastic_transform_p)
image_shape = image.get_shape().as_list()
image = random_color_transformations(image,
self.brightness_max_delta,
self.saturation_lower,
self.saturation_upper,
self.hue_max_delta,
self.contrast_lower,
self.contrast_upper)
image = gaussian_blur(image,
self.blur_probability,
self.blur_size,
self.blur_mean,
self.blur_std)
image = salt_and_pepper(image,
self.salt_prob,
self.pepper_prob)
image = gaussian_noise(image,self.noise_stddev)
image,masks = random_rotation(
image,*masks,
discrete_rotation=self.discrete_rotation)
if self.min_jpeg_quality - self.max_jpeg_quality != 0:
image = random_jpeg_quality(image,
self.min_jpeg_quality,
self.max_jpeg_quality
)
image = tf.reshape(image,image_shape)
if len(masks) == 0:
return image
else:
return image,(*masks)
def random_color_transformations(
image,
brightness_max_delta,
saturation_lower,
saturation_upper,
hue_max_delta,
contrast_lower,
contrast_upper
):
"""
Function to randomly alter an images brightness, saturation, hue and
contrast.
Parameters:
* image - three channel image (H,W,3)
* brightness_params - dictionary with parameters for
tf.image.random_brightness
* saturation_params - dictionary with parameters for
tf.image.random_saturation
* hue_params - dictionary with parameters for tf.image.random_hue
* contrast_params - dictionary with parameters for tf.image.random_contrast
"""
if brightness_max_delta != 0:
brightness = lambda x: tf.image.random_brightness(x,
brightness_max_delta)
else:
brightness = lambda x: x
if saturation_lower - saturation_upper != 0:
saturation = lambda x: tf.image.random_saturation(x,
saturation_lower,
saturation_upper)
else:
saturation = lambda x: x
if hue_max_delta != 0:
hue = lambda x: tf.image.random_hue(x,hue_max_delta)
else:
hue = lambda x: x
if contrast_lower - contrast_upper != 0:
contrast = lambda x: tf.image.random_contrast(x,
contrast_lower,
contrast_upper)
else:
contrast = lambda x: x
def distort_colors_0(image):
image = brightness(image)
image = saturation(image)
image = hue(image)
image = contrast(image)
return image
def distort_colors_1(image):
image = saturation(image)
image = brightness(image)
image = contrast(image)
image = hue(image)
return image
def distort_colors_2(image):
image = contrast(image)
image = hue(image)
image = brightness(image)
image = saturation(image)
return image
def distort_colors(image,color_ordering):
image = tf.cond(
tf.equal(color_ordering,0),
lambda: distort_colors_0(image),
lambda: tf.cond(
tf.equal(color_ordering,1),
lambda: distort_colors_1(image),
lambda: tf.cond(tf.equal(color_ordering,2),
lambda: distort_colors_2(image),
lambda: image)
)
)
return image
with tf.variable_scope('RandomColor') and tf.name_scope('RandomColor'):
color_ordering = tf.random_uniform([],0,4,tf.int32)
image = distort_colors(image,color_ordering)
image = tf.clip_by_value(image,0.,1.)
return image
def salt_and_pepper(
image,
salt_prob=0.01,
pepper_prob=0.01
):
with tf.variable_scope('SaltAndPepper') and tf.name_scope('SaltAndPepper'):
def get_mask(h,w,p):
return tf.expand_dims(
tf.where(
tf.random.uniform(
shape=(h,w),
minval=0.,
maxval=1.) > p
,
tf.ones((h,w)),
tf.zeros((h,w))),
axis=2)
image_shape = tf.shape(image)
image_shape_list = image.get_shape().as_list()
salt_mask = get_mask(image_shape[0],image_shape[1],salt_prob)
pepper_mask = get_mask(image_shape[0],image_shape[1],pepper_prob)
if len(image_shape_list) == 3:
salt_mask = tf.expand_dims(salt_mask,axis=2)
pepper_mask = tf.expand_dims(pepper_mask,axis=2)
image = tf.where(salt_mask == 1.,
tf.ones_like(image),
image)
image = tf.where(pepper_mask == 1.,
tf.zeros_like(image),
image)
return image
def gaussian_noise(
image,
stddev=0.05
):
with tf.variable_scope('GaussianNoise') and tf.name_scope('GaussianNoise'):
image = image + tf.random.normal(tf.shape(image),stddev=0.05)
image = tf.clip_by_value(image,0.,1.)
return image
def random_rotation(
image,
*masks,
discrete_rotation=True
):
with tf.variable_scope('RandomRot') and tf.name_scope('RandomRot'):
flip_lr_prob = tf.random.uniform([]) > 0.5
flip_ud_prob = tf.random.uniform([]) > 0.5
image = tf.cond(flip_lr_prob,
lambda: tf.image.flip_left_right(image),
lambda: image)
masks = [
tf.cond(flip_lr_prob,
lambda: tf.image.flip_left_right(m),
lambda: m) for m in masks
]
image = tf.cond(flip_lr_prob,
lambda: tf.image.flip_up_down(image),
lambda: image)
masks = [
tf.cond(flip_lr_prob,
lambda: tf.image.flip_up_down(m),
lambda: m) for m in masks
]
if discrete_rotation == True:
rot90_prob = tf.random.uniform([]) > 0.5
rot90_angle = tf.random.uniform([],minval=0,maxval=4,
dtype=tf.int32)
image = tf.cond(rot90_prob,
lambda: tf.image.rot90(image,rot90_angle),
lambda: image)
masks = [
tf.cond(rot90_prob,
lambda: tf.image.rot90(m,rot90_angle),
lambda: m) for m in masks
]
return image, masks
def gaussian_blur(
image,
blur_probability=0.1,
size=1,
mean=0.0,
std=0.05):
"""
Function to randomly apply a gaussian blur on an image. Based on https://stackoverflow.com/questions/52012657/how-to-make-a-2d-gaussian-filter-in-tensorflow/52012658
Parameters:
* image - three channel image (H,W,3)
* blur_probability - probability for bluring
* size - kernel size
* mean - distribution mean
* std - distribution std
"""
def gaussian_kernel(size,mean,std):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(float(mean), float(std))
vals = d.prob(tf.range(start=-size,limit=size+1,dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij',vals,vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
with tf.variable_scope('RandomRot') and tf.name_scope('RandomRot'):
image_shape = image.get_shape().as_list()
gaussian_filter = gaussian_kernel(size,mean,std)
gaussian_filter = tf.stack([gaussian_filter for _ in range(3)],axis=-1)
gaussian_filter = tf.stack([gaussian_filter for _ in range(3)],axis=-1)
transformed_image = tf.cond(
tf.random.uniform([],0.,1.) < blur_probability,
lambda: tf.nn.conv2d(tf.expand_dims(image,axis=0),
gaussian_filter,
strides=[1,1,1,1],padding="SAME"),
lambda: image
)
transformed_image = tf.reshape(image,image_shape)
return transformed_image
def random_jpeg_quality(image,
min_jpeg_quality=30,
max_jpeg_quality=70):
"""
Function to randomly alter JPEG quality.
Parameters:
* image - three channel image (H,W,3)
* min_jpeg_quality - minimum JPEG quality
* max_jpeg_quality - maximum JPEG quality
"""
return tf.image.random_jpeg_quality(image,
min_jpeg_quality,
max_jpeg_quality)
def elastic_transform(image,*masks,sigma=10,alpha_affine=10,p=0.7):
"""
Applies elastic distortion (elastic transform) to images and their
respective masks. Requires
Parameters:
* image - three channel image (H,W,3)
* masks - masks to be augmented with the image
* sigma, alpha_affine, p - parameters for the ElasticTransform class
"""
def unpack_et(image,masks):
out = et(image=image,masks=masks)
image,masks = out['image'],out['masks']
out = [image,*masks]
return out
et = ElasticTransform(sigma=sigma,
alpha=100,
alpha_affine=alpha_affine,
p=p)
shapes = [x.get_shape().as_list() for x in [image,*masks]]
out = tf.py_func(
lambda x,*y: unpack_et(image=x,masks=y),
[image,*masks],
Tout=[tf.float32,*[tf.float32 for _ in masks]])
out = [tf.reshape(out[i],shapes[i]) for i in range(len(out))]
image = out[0]
masks = out[1:]
return image, masks
``` |
{
"source": "josegcpa/quality-net",
"score": 2
} |
#### File: josegcpa/quality-net/quality_net_utilities.py
```python
import os
import numpy as np
import h5py
from PIL import Image
from glob import glob
import tensorflow as tf
from tensorflow import keras
def quality_net_model(defined_model,h,w):
prediction_layer = keras.Sequential(
[keras.layers.Dense(512,activation='relu'),
keras.layers.Dense(1,'sigmoid')])
inputs = tf.keras.Input(shape=(h, w, 3))
x = defined_model(inputs)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
return model
class ColourAugmentation(keras.layers.Layer):
def __init__(self,
brightness_delta,
contrast_lower,contrast_upper,
hue_delta,
saturation_lower,saturation_upper,
min_jpeg_quality,max_jpeg_quality,
probability=0.1):
super(ColourAugmentation,self).__init__()
self.probability = probability
self.brightness_delta = brightness_delta
self.contrast_lower = contrast_lower
self.contrast_upper = contrast_upper
self.hue_delta = hue_delta
self.saturation_lower = saturation_lower
self.saturation_upper = saturation_upper
self.min_jpeg_quality = min_jpeg_quality
self.max_jpeg_quality = max_jpeg_quality
def brightness(self,x):
return tf.image.random_brightness(
x,self.brightness_delta)
def contrast(self,x):
return tf.image.random_contrast(
x,self.contrast_lower,self.contrast_upper)
def hue(self,x):
return tf.image.random_hue(
x,self.hue_delta)
def saturation(self,x):
return tf.image.random_saturation(
x,self.saturation_lower,self.saturation_upper)
def jpeg_quality(self,x):
if (self.max_jpeg_quality - self.min_jpeg_quality) > 0:
return tf.image.random_jpeg_quality(
x,self.min_jpeg_quality,self.max_jpeg_quality)
else:
return x
def call(self,x):
fn_list = [self.brightness,self.contrast,
self.hue,self.saturation]
np.random.shuffle(fn_list)
for fn in fn_list:
if np.random.uniform() < self.probability:
x = fn(x)
if np.random.uniform() < self.probability:
x = jpeg_quality(x)
x = tf.clip_by_value(x,0,1)
return x
class Flipper(keras.layers.Layer):
def __init__(self,probability=0.1):
super(Flipper,self).__init__()
self.probability = probability
def call(self,x):
if np.random.uniform() < self.probability:
x = tf.image.flip_left_right(x)
if np.random.uniform() < self.probability:
x = tf.image.flip_up_down(x)
return x
class ImageCallBack(keras.callbacks.Callback):
def __init__(self,save_every_n,tf_dataset,log_dir):
super(ImageCallBack, self).__init__()
self.save_every_n = save_every_n
self.tf_dataset = iter(tf_dataset)
self.log_dir = log_dir
self.writer = tf.summary.create_file_writer(self.log_dir)
self.count = 0
def on_train_batch_end(self, batch, logs=None):
if self.count % self.save_every_n == 0:
batch = next(self.tf_dataset)
y_augmented,y_true = batch
prediction = self.model.predict(y_augmented)
with self.writer.as_default():
tf.summary.image("0:InputImage",y_augmented,self.count)
tf.summary.image("1:GroundTruth",y_true,self.count)
tf.summary.image("2:Prediction",prediction,self.count)
tf.summary.scalar("Loss",logs['loss'],self.count)
tf.summary.scalar("MAE",logs['mean_absolute_error'],self.count)
self.count += 1
class DataGenerator:
def __init__(self,hdf5_path,shuffle=True,transform=None):
self.hdf5_path = hdf5_path
self.h5 = h5py.File(self.hdf5_path,'r')
self.shuffle = shuffle
self.transform = transform
self.all_keys = list(self.h5.keys())
self.n_images = len(self.all_keys)
def generate(self,with_path=False):
image_idx = [x for x in range(self.n_images)]
if self.shuffle == True:
np.random.shuffle(image_idx)
for idx in image_idx:
P = self.all_keys[idx]
x = self.h5[P]['image'][:,:,:3]
c = [float(self.h5[P]['class'][()])]
x = tf.convert_to_tensor(x) / 255
if self.transform is not None:
x = self.transform(x)
if with_path == True:
yield x,c,[P]
else:
yield x,c
class LargeImage:
def __init__(self,image,tile_size=[512,512],
output_channels=3,offset=0):
"""
Class facilitating the prediction for large images by
performing all the necessary operations - tiling and
reconstructing the output.
"""
self.image = image
self.tile_size = tile_size
self.output_channels = output_channels
self.offset = offset
self.h = self.tile_size[0]
self.w = self.tile_size[1]
self.sh = self.image.shape[:2]
self.output = np.zeros([self.sh[0],self.sh[1],self.output_channels])
self.denominator = np.zeros([self.sh[0],self.sh[1],1])
def tile_image(self):
for x in range(0,self.sh[0]+self.offset,self.h):
x = x - self.offset
if x + self.tile_size[0] > self.sh[0]:
x = self.sh[0] - self.tile_size[0]
for y in range(0,self.sh[1]+self.offset,self.w):
y = y - self.offset
if y + self.tile_size[1] > self.sh[1]:
y = self.sh[1] - self.tile_size[1]
x_1,x_2 = x, x+self.h
y_1,y_2 = y, y+self.w
yield self.image[x_1:x_2,y_1:y_2,:],((x_1,x_2),(y_1,y_2))
def update_output(self,image,coords):
(x_1,x_2),(y_1,y_2) = coords
self.output[x_1:x_2,y_1:y_2,:] += image
self.denominator[x_1:x_2,y_1:y_2,:] += 1
def return_output(self):
return self.output/self.denominator
class Accuracy(keras.metrics.Accuracy):
# adapts Accuracy to work with model.fit using logits
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.where(
y_pred > 0.5,tf.ones_like(y_pred),tf.zeros_like(y_pred))
return super().update_state(y_true,y_pred,sample_weight)
class Precision(tf.keras.metrics.Precision):
# adapts Precision to work with model.fit using logits
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.where(
y_pred > 0.5,tf.ones_like(y_pred),tf.zeros_like(y_pred))
return super().update_state(y_true,y_pred,sample_weight)
class Recall(tf.keras.metrics.Recall):
# adapts Sensitivity to work with model.fit using logits
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.where(
y_pred > 0.5,tf.ones_like(y_pred),tf.zeros_like(y_pred))
return super().update_state(y_true,y_pred,sample_weight)
``` |
{
"source": "josegcpa/u-net-tf2",
"score": 3
} |
#### File: josegcpa/u-net-tf2/tf_da.py
```python
from math import pi
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import cv2
from albumentations import ElasticTransform
"""
Set of functions used to perform data augmentation on tf.
"""
class ElasticTransformWrapper:
def __init__(self,sigma,alpha,alpha_affine,p):
self.sigma = sigma
self.alpha = alpha
self.alpha_affine = alpha_affine
self.p = p
self.et = ElasticTransform(
sigma=self.sigma,alpha=self.alpha,
alpha_affine=self.alpha_affine,p=self.p)
def __call__(self,image,*masks):
out = self.et(image=image,masks=masks)
image,masks = out['image'],out['masks']
out = [image,*masks]
return out
class ImageAugmenter:
def __init__(self,
brightness_max_delta=16. / 255.,
saturation_lower=0.8,
saturation_upper=1.2,
hue_max_delta=0.2,
contrast_lower=0.8,
contrast_upper=1.2,
noise_stddev=0.05,
blur_probability=0.1,
blur_size=3,
blur_mean=0.,
blur_std=0.05,
discrete_rotation=True,
min_jpeg_quality=30,
max_jpeg_quality=70,
elastic_transform_sigma=10,
elastic_transform_alpha_affine=0,
elastic_transform_p=0.7):
self.brightness_max_delta = brightness_max_delta
self.saturation_lower = saturation_lower
self.saturation_upper = saturation_upper
self.hue_max_delta = hue_max_delta
self.contrast_lower = contrast_lower
self.contrast_upper = contrast_upper
self.noise_stddev = noise_stddev
self.blur_probability = blur_probability
self.blur_size = blur_size
self.blur_mean = blur_mean
self.blur_std = blur_std
self.discrete_rotation = discrete_rotation
self.min_jpeg_quality = min_jpeg_quality
self.max_jpeg_quality = max_jpeg_quality
self.elastic_transform_sigma = elastic_transform_sigma
self.elastic_transform_alpha_affine = elastic_transform_alpha_affine
self.elastic_transform_p = elastic_transform_p
def __str__(self):
return "ImageAugmenter class"
def augment(self,image,*masks):
image = tf.image.convert_image_dtype(image,tf.float32)
masks = [tf.image.convert_image_dtype(m,tf.float32) for m in masks]
if self.elastic_transform_p > 0:
image,masks = elastic_transform(
image,*masks,
sigma=self.elastic_transform_sigma,
alpha_affine=self.elastic_transform_alpha_affine,
p=self.elastic_transform_p)
image_shape = image.shape.as_list()
image = random_color_transformations(image,
self.brightness_max_delta,
self.saturation_lower,
self.saturation_upper,
self.hue_max_delta,
self.contrast_lower,
self.contrast_upper)
image = gaussian_blur(image,
self.blur_probability,self.blur_size,
self.blur_mean,self.blur_std)
image = gaussian_noise(image,self.noise_stddev)
image,masks = random_rotation(
image,*masks,
discrete_rotation=self.discrete_rotation)
if self.min_jpeg_quality - self.max_jpeg_quality != 0:
image = random_jpeg_quality(image,
self.min_jpeg_quality,
self.max_jpeg_quality
)
image = tf.reshape(image,image_shape)
if len(masks) == 0:
return image
else:
return image,(*masks)
def random_color_transformations(
image,
brightness_max_delta,
saturation_lower,saturation_upper,
hue_max_delta,
contrast_lower,contrast_upper):
"""
Function to randomly alter an images brightness, saturation, hue and
contrast.
Parameters:
* image - three channel image (H,W,3)
* brightness_params - dictionary with parameters for
tf.image.random_brightness
* saturation_params - dictionary with parameters for
tf.image.random_saturation
* hue_params - dictionary with parameters for tf.image.random_hue
* contrast_params - dictionary with parameters for tf.image.random_contrast
"""
def brightness(x):
if brightness_max_delta != 0:
return tf.image.random_brightness(x,brightness_max_delta)
else:
return x
def saturation(x):
if saturation_lower - saturation_upper != 0:
return tf.image.random_brightness(
x,saturation_lower,saturation_upper)
else:
return x
def hue(x):
if hue_max_delta != 0:
return tf.image.random_hue(x,hue_max_delta)
else:
return x
def contrast(x):
if contrast_lower - contrast_upper != 0:
return tf.image.random_contrast(
x,contrast_lower,contrast_upper)
else:
return x
def distort_colors_0(image):
image = brightness(image)
image = saturation(image)
image = hue(image)
image = contrast(image)
return image
def distort_colors_1(image):
image = saturation(image)
image = brightness(image)
image = contrast(image)
image = hue(image)
return image
def distort_colors_2(image):
image = contrast(image)
image = hue(image)
image = brightness(image)
image = saturation(image)
return image
def distort_colors(image,color_ordering):
if color_ordering == 0:
return distort_colors_0(image)
if color_ordering == 1:
return distort_colors_1(image)
if color_ordering == 2:
return distort_colors_2(image)
return image
color_ordering = tf.random.uniform([],0,4,tf.int32)
image = distort_colors(image,color_ordering)
image = tf.clip_by_value(image,0.,1.)
return image
def salt_and_pepper(image,salt_prob=0.01,pepper_prob=0.01):
def get_mask(h,w,p):
return tf.expand_dims(
tf.where(tf.random.uniform(shape=(h,w),minval=0.,maxval=1.) > p,
tf.ones((h,w)),
tf.zeros((h,w))),
axis=2)
image_shape = tf.shape(image)
image_shape_list = image.shape.as_list()
salt_mask = get_mask(image_shape[0],image_shape[1],salt_prob)
pepper_mask = get_mask(image_shape[0],image_shape[1],pepper_prob)
image = tf.where(salt_mask == 1.,tf.ones_like(image),image)
image = tf.where(pepper_mask == 1.,tf.zeros_like(image),image)
return image
def gaussian_noise(image,stddev=0.01):
image = image + tf.random.normal(
tf.shape(image),stddev=stddev)
image = tf.clip_by_value(image,0.,1.)
return image
def random_rotation(image,*masks,discrete_rotation=True):
flip_lr_prob = tf.random.uniform([]) > 0.5
flip_ud_prob = tf.random.uniform([]) > 0.5
if flip_lr_prob == True:
image = tf.image.flip_left_right(image)
masks = [tf.image.flip_left_right(m) for m in masks]
if flip_ud_prob == True:
image = tf.image.flip_up_down(image)
masks = [tf.image.flip_up_down(m) for m in masks]
if discrete_rotation == True:
rot90_prob = tf.random.uniform([]) > 0.5
rot90_angle = tf.random.uniform([],minval=0,maxval=4,
dtype=tf.int32)
if rot90_prob == True:
image = tf.image.rot90(image,rot90_angle)
masks = [tf.image.rot90(m,rot90_angle) for m in masks]
return image, masks
def gaussian_blur(
image,
blur_probability=0.1,
size=1,
mean=0.0,
std=0.05):
"""
Function to randomly apply a gaussian blur on an image.
Based on https://stackoverflow.com/questions/52012657/how-to-make-a-2d-gaussian-filter-in-tensorflow/52012658
Parameters:
* image - three channel image (H,W,3)
* blur_probability - probability for bluring
* size - kernel size
* mean - distribution mean
* std - distribution std
"""
def gaussian_kernel(size,mean,std):
"""Makes 2D gaussian Kernel for convolution."""
d = tfp.distributions.Normal(float(mean), float(std))
vals = d.prob(tf.range(start=-size,limit=size+1,dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij',vals,vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
image_shape = image.shape.as_list()
gaussian_filter = gaussian_kernel(size,mean,std)
gaussian_filter = tf.stack([gaussian_filter for _ in range(3)],axis=-1)
gaussian_filter = tf.stack([gaussian_filter for _ in range(3)],axis=-1)
if tf.random.uniform([]) < blur_probability:
transformed_image = tf.nn.conv2d(
tf.expand_dims(image,axis=0),
gaussian_filter,strides=[1,1,1,1],padding="SAME")
transformed_image = tf.reshape(image,image_shape)
return transformed_image
def random_jpeg_quality(image,
min_jpeg_quality=30,
max_jpeg_quality=70):
"""
Function to randomly alter JPEG quality.
Parameters:
* image - three channel image (H,W,3)
* min_jpeg_quality - minimum JPEG quality
* max_jpeg_quality - maximum JPEG quality
"""
return tf.image.random_jpeg_quality(image,
min_jpeg_quality,
max_jpeg_quality)
def elastic_transform(image,*masks,sigma=10,alpha_affine=10,p=0.7):
"""
Applies elastic distortion (elastic transform) to images and their
respective masks. Requires
Parameters:
* image - three channel image (H,W,3)
* masks - masks to be augmented with the image
* sigma, alpha_affine, p - parameters for the ElasticTransform class
"""
ET = ElasticTransformWrapper(
sigma=sigma,alpha=100,
alpha_affine=alpha_affine,p=p
)
shapes = [x.shape.as_list() for x in [image,*masks]]
types_out = [tf.float32,*[tf.float32 for _ in masks]]
out = tf.numpy_function(ET,[image,*masks],Tout=types_out)
out = [tf.reshape(out[i],shapes[i]) for i in range(len(out))]
image = out[0]
masks = out[1:]
return image, masks
``` |
{
"source": "josegg05/eRGWnet",
"score": 2
} |
#### File: josegg05/eRGWnet/engine.py
```python
import torch.optim as optim
from model import *
import util
class trainer():
def __init__(self, scaler, in_dim, seq_length, num_nodes, nhid , dropout, lrate, wdecay, device, supports, gcn_bool, addaptadj, adjinit, blocks, eRec=False, retrain=False, checkpoint='', error_size=6):
if eRec:
self.model = eRGwnet(device, num_nodes, dropout, supports=supports, gcn_bool=gcn_bool, addaptadj=addaptadj,
adjinit=adjinit, in_dim=in_dim, out_dim=seq_length, residual_channels=nhid,
dilation_channels=nhid, skip_channels=nhid * 8, end_channels=nhid * 16,
blocks=blocks, error_size=error_size)
else:
self.model = gwnet(device, num_nodes, dropout, supports=supports, gcn_bool=gcn_bool, addaptadj=addaptadj,
adjinit=adjinit, in_dim=in_dim, out_dim=seq_length, residual_channels=nhid,
dilation_channels=nhid, skip_channels=nhid * 8, end_channels=nhid * 16,
blocks=blocks)
self.model.to(device)
if retrain:
self.model.load_state_dict(torch.load(checkpoint, map_location=torch.device(device)))
print(self.model)
self.optimizer = optim.Adam(self.model.parameters(), lr=lrate, weight_decay=wdecay)
self.loss = util.masked_mae
self.scaler = scaler
self.clip = 5
self.eRec=eRec
def train(self, input, real_val):
self.model.train()
self.optimizer.zero_grad()
input = nn.functional.pad(input,(1,0,0,0))
real = torch.unsqueeze(real_val, dim=-3)
#print(f'input shape: {input.shape}')
if self.eRec:
output = self.model(input, real, self.scaler)
real = real[-1, :, :, :, :]
else:
output = self.model(input)
output = output.transpose(1,3)
# print(f'input shape: {input.shape}')
# print(f'output shape: {output.shape}')
# # output = [batch_size,12,num_nodes,1]
# print(f'real_val shape: {real_val.shape}')
# print(f'real shape: {real.shape}')
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
loss.backward()
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
return loss.item(),mape,rmse
def eval(self, input, real_val):
self.model.eval()
input = nn.functional.pad(input,(1,0,0,0))
real = torch.unsqueeze(real_val, dim=-3)
if self.eRec:
output = self.model(input, real, self.scaler)
real = real[-1, :, :, :, :]
else:
output = self.model(input)
output = output.transpose(1,3)
#output = [batch_size,12,num_nodes,1]
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
return loss.item(),mape,rmse
```
#### File: eRGWnet/scripts/generate_detectors_distance.py
```python
import argparse
import numpy as np
import os
import pandas as pd
import geopy.distance as geo
def generate_distance_file(args):
df = pd.read_csv(args.location_df_filename)
distances = pd.DataFrame(columns=['from','to','cost'])
for index0, row0 in df.iterrows():
cord0 = (df['latitude'][index0], df['longitude'][index0])
for index1, row1 in df.iterrows():
cord1 = (df['latitude'][index1], df['longitude'][index1])
distances = distances.append(pd.DataFrame([[row0['sensor_id'], row1['sensor_id'], geo.distance(cord0, cord1).m]],
columns=['from', 'to', 'cost']))
print(distances.head(30))
distances.to_csv(args.output_dir + '/' + args.output_filename, index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/sensor_graph", help="Output directory.")
parser.add_argument("--output_filename", type=str, default="distance_vegas_28.csv", help="Output directory.")
parser.add_argument("--location_df_filename", type=str, default="data/sensor_graph/graph_sensor_locations.csv", help="Raw traffic readings.",)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_distance_file(args)
```
#### File: josegg05/eRGWnet/util.py
```python
import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from prettytable import PrettyTable
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename, adjtype):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return sensor_ids, sensor_id_to_ind, adj
def load_dataset(dataset_dir, batch_size, valid_batch_size=None, test_batch_size=None,
eRec=False, eR_seq_size=12, suffix='', scaler=None):
data = {}
if eRec:
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
if os.path.exists(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz')):
cat_data = np.load(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
else:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
data_size = data['x_' + category].shape[0]
data_x_aux = np.zeros((1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2], data['x_' + category].shape[3]))
data_x_aux_temp = np.zeros((1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2],
data['x_' + category].shape[3]))
data_y_aux = np.zeros((1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
data_y_aux_temp = np.zeros((1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
for idx in range(data_size - eR_seq_size):
x = data['x_' + category][idx:idx+eR_seq_size]
x = np.expand_dims(x, axis=0)
y = data['y_' + category][idx:idx + eR_seq_size]
y = np.expand_dims(y, axis=0)
data_x_aux_temp = np.append(data_x_aux_temp, x, axis=0)
data_y_aux_temp = np.append(data_y_aux_temp, y, axis=0)
if idx % 1000 == 0:
print(idx)
data_x_aux = np.append(data_x_aux, data_x_aux_temp[1:], axis=0)
data_y_aux = np.append(data_y_aux, data_y_aux_temp[1:], axis=0)
data_x_aux_temp = np.zeros(
(1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2],
data['x_' + category].shape[3]))
data_y_aux_temp = np.zeros(
(1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
data_x_aux = np.append(data_x_aux, data_x_aux_temp[1:], axis=0)
data_y_aux = np.append(data_y_aux, data_y_aux_temp[1:], axis=0)
data['x_' + category] = data_x_aux[1:]
data['y_' + category] = data_y_aux[1:]
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
np.savez_compressed(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz'), x=data['x_' + category], y=data['y_' + category])
else:
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
if scaler is None:
scaler = StandardScaler(mean=data[f'x_train{suffix}'][..., 0].mean(),
std=data[f'x_train{suffix}'][..., 0].std())
# Data format
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['train_loader'] = DataLoader(data[f'x_train{suffix}'], data[f'y_train{suffix}'], batch_size)
data['val_loader'] = DataLoader(data[f'x_val{suffix}'], data[f'y_val{suffix}'], valid_batch_size)
data['test_loader'] = DataLoader(data[f'x_test{suffix}'], data[f'y_test{suffix}'], test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def print_loss(loss_type, loss):
if type(loss) == torch.Tensor:
out = torch.mean(loss)
else:
out = np.mean(loss)
print(f'{loss_type} = {out}\n')
return out
def print_loss_sensor(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
if len(loss.shape) > 2:
dim = (0, 1)
else:
dim = 0
if type(loss) == torch.Tensor:
out = torch.mean(loss, dim)
tab.add_column("mean_detectors", out.numpy())
else:
out = np.mean(loss, dim)
tab.add_column(f"{loss_type}_detectors", out)
print(f'{loss_type} per sensor:')
print(tab, '\n')
return out
def print_loss_seq(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
tab.field_names = [f"time_{i+1}" for i in range(loss.shape[1])]
if type(loss) == torch.Tensor:
out = torch.mean(loss, (0, 2))
tab.add_row(out.numpy())
else:
out = np.mean(loss, (0, 2))
tab.add_row(out)
print(f'{loss_type} per sequence time-step:')
print(tab, '\n')
return out
def print_loss_sensor_seq(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
tab.field_names = [f"time_{i+1}" for i in range(loss.shape[1])]
if type(loss) == torch.Tensor:
out = torch.mean(loss, 0)
tab.add_rows(out.transpose(0, 1).numpy())
else:
out = np.mean(loss, 0)
tab.add_rows(out.transpose(0, 1))
print(f'{loss_type} per sensor per sequence time-step')
print(tab, '\n')
return out
``` |
{
"source": "jose-gilberto/natural-selection",
"score": 4
} |
#### File: jose-gilberto/natural-selection/natural_selection.py
```python
import random
# Simple natural selection algorithm
bases = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ ') # Including the blank space
goal_organism = 'CONSEGUE MOISES'
# Generates first organism of our natural selection.
# 'Genome' is all random letters
def abiogenesis():
return ''.join([random.choice(bases) for x in range(len(goal_organism))])
number_of_children = 100
def reproduce(parent_code, number_of_children):
children = []
for x in range(number_of_children):
children.append(mutate(parent_code, mutation_rate))
return children
mutation_rate = 0.05
def mutate(parent_code, mutation_rate):
new_code = []
for charac in parent_code:
if random.random() < mutation_rate:
new_code.append(random.choice(bases))
else:
new_code.append(charac)
return ''.join(new_code)
def natural_selection(organisms):
fittest_organism = 0
fittest_organism_code = ''
for child in organisms:
fittness = 0
for x in range(len(child)):
if child[x-1] == goal_organism[x-1]:
fittness += 1
if fittness >= fittest_organism:
fittest_organism = fittness
fittest_organism_code = child
return fittest_organism_code
first_organism = abiogenesis()
surviving_organism = first_organism
generation = 1
organisms = [first_organism]
while surviving_organism != goal_organism:
print('Generation: %s' % str(generation))
surviving_organism = natural_selection(organisms)
print('Fittest organisms genetic code: %s' % surviving_organism)
organisms = reproduce(surviving_organism, number_of_children)
generation += 1
if generation > 1000: # Fix the loop bug that ocrr when the generations is more than 1000x
break
``` |
{
"source": "jose-gilberto/yardb",
"score": 3
} |
#### File: yardb/core/startup.py
```python
import os
from abc import ABC, abstractmethod
from yardb import __version__
class Subsystem(ABC):
"""
The subsystem can accept requests either from the facade or client directly.
In any case, to the subsystem, the Facade is yet another client, and it's
not a part of the Subsystem.
"""
@abstractmethod
def start(self):
pass
# Facade pattern
class YardbFacade:
"""
The Facade class provides a simple interface to the complex logic of
the yardb subsystems. The Facade delegates the cliente requests to the
appropriate objects within the subsystem. The Facade is also responsible for
managing their lifecycle. All of this shields the client for undesired
complexity of subsystems.
- text by refactoring guru with some adaptations.
"""
def __init__(self) -> None:
self.subsystems = [ YardbFolderSystem() ]
def start(self):
for subsystem in self.subsystems:
subsystem.start()
class YardbFolderSystem(Subsystem):
ROOT_PATH = os.path.expanduser('~/.yardb/')
CONF_PATH = os.path.expanduser('~/.yardb/yardb.conf')
def create_folders(self) -> None:
if os.path.isdir(YardbFolderSystem.ROOT_PATH):
return
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/public/tables'))
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/public/views'))
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/public/indexes'))
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/template/tables'))
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/template/views'))
os.makedirs(os.path.join(YardbFolderSystem.ROOT_PATH, 'base/template/indexes'))
with open(YardbFolderSystem.CONF_PATH, 'w') as conf_file:
conf_file.write(f'VERSION:{__version__}\n')
conf_file.write(f'CLUSTER:main')
conf_file.close()
return
def start(self):
self.create_folders()
```
#### File: query/parser/parser.py
```python
from yardb.query.parser.ast.structures import TableStructure
from yardb.query.parser.ast.statements import SelectStatement
from yardb.query.parser.ast.operations import ProjectionOperation
from .lexer import Lexer, TokenKind
class Parser:
lexer: Lexer
def __init__(self, lexer: Lexer) -> None:
self.lexer = lexer
self.current_token = self.lexer.get_next_token()
def error(self):
raise Exception(f'ERR - Parser error : unknown token {self.current_token}.')
def eat(self, token_kind: TokenKind):
if self.current_token.kind == token_kind:
self.current_token = self.lexer.get_next_token()
else:
self.error()
def statement(self):
token = self.current_token
if token.kind == TokenKind.SELECT:
self.eat(TokenKind.SELECT)
stmt = self.select_statement()
self.eat(TokenKind.SEMICOLON)
return stmt
def select_statement(self):
token = self.current_token
self.eat(TokenKind.ASTERISK)
self.eat(TokenKind.FROM)
table_name = self.current_token.value
self.eat(TokenKind.ID)
return SelectStatement(
ProjectionOperation(
TableStructure(table_name)
)
)
def parser(self):
return self.statement()
``` |
{
"source": "Jose-G-Melo/Delivery-de-Pizza",
"score": 3
} |
#### File: Jose-G-Melo/Delivery-de-Pizza/home.py
```python
from tkinter import *
from tkinter.font import Font
from PIL import Image, ImageTk
import sys
class myApp(object):
def __init__(self, **kw):
self.window = Tk()
self.window.title("<NAME>")
self.window.iconbitmap('pizzariaLuigi.ico')
self.window.geometry("{}x{}" .format(self.window.winfo_screenwidth(), self.window.winfo_screenheight()))
my_font = Font(family="Comic Sans MS",size=16, weight="bold")
img = Image.open('backgroundPizza.jpg')
img = img.resize((1380, 600), Image.ANTIALIAS)
image = ImageTk.PhotoImage(img)
self.window.img = image
bg_image = Canvas(self.window, width=1380, height = 600, bg="red")
bg_image.create_image(0, -150, image=image, anchor="nw")
bg_image.create_text(680, 300, text="<NAME>", font=Font(family="Comic Sans MS",size=60, weight="bold"), fill="#FFFFFF")
bg_image.pack()
background_white = Label(self.window, background="#FFFFFF")
background_white.place(x=0, y=430, relwidth=1, relheight=1)
subtitle = Label(self.window, text="Sabor com qualidade!", font=my_font, background="#FFFFFF", foreground="#2D2119")
subtitle.place(x=560, y=430)
buttonLogin = Button(self.window, width=21, text="ENTRAR", relief=SOLID, font=my_font)
buttonLogin.configure(background="#FFFFFF", foreground="#2D2119")
buttonLogin.place(x=530, y=525)
buttonCreateAccount = Button(self.window, width=21, text="CADASTRAR", relief=SOLID,font=my_font)
buttonCreateAccount.configure(background="#2D2119", foreground="#FFFFFF")
buttonCreateAccount.place(x=530, y=600)
nameCreator = Label(self.window, text="Software criado e mantido por <NAME>", foreground="#2D2119", background="#FFFFFF")
nameCreator.configure(font=Font(family="Comic Sans MS",size=10, weight="bold"))
nameCreator.place(x=1000, y=680)
def execute(self):
self.window.mainloop()
def main(args):
app_proc = myApp()
app_proc.execute()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "josegob/microsoft-sec-api",
"score": 2
} |
#### File: api/helpers/CreateAzureAppService.py
```python
from .AzureSDKRepository import AzureSDKRepository
class CreateAzureAppService:
def __init__(self, config):
self.repository = AzureSDKRepository(config)
def execute(self):
return self.repository.init_azure_app()
```
#### File: api/helpers/CreateAzureTokenService.py
```python
from .CreateAzureAppService import CreateAzureAppService
from .AzureSDKRepository import AzureSDKRepository
class CreateAzureTokenService:
def __init__(self, config):
self.repository = AzureSDKRepository(config)
self.create_azure_app_service = CreateAzureAppService(config)
def execute(self):
app = self.create_azure_app_service.execute()
if app is None:
return app
return self.repository.create_token(app)
```
#### File: api/helpers/PatchAlertsByProviderService.py
```python
import json
from .GetAlertsByProviderService import GetAlertsByProviderService
class PatchAlertsByProviderService(GetAlertsByProviderService):
def __init__(self, config, request_data):
super().__init__(config)
self.request_body = request_data.body
def execute(self, alert_id):
patch_alerts_response = self.azure_api_repository.patchAlerts(alert_id, self.request_body)
if patch_alerts_response is not None:
self.error_message = ("Error updating alert. Check either if the alert ID is correct"
"or the credentials are correct.")
return None
return True
```
#### File: api/helpers/ValidateRequestBodyService.py
```python
from .ValidateSecretsBodyService import ValidateSecretsBodyService
from .CheckBodyParamsService import CheckBodyParamsService
from django.apps import apps
app_config = apps.get_app_config('api')
class ValidateRequestBodyService(ValidateSecretsBodyService):
def __init__(self, request_body, method):
super().__init__(request_body)
self.method = method
self.check_body_params_service = CheckBodyParamsService(
self.request_body, self.method)
def execute(self):
if not self.check_body_params_service.execute():
self.error_message = self.check_body_params_service.error_message
return None
if self.method == 'GET':
self.provider_name = self.request_body['provider']
return True
```
#### File: apps/api/views.py
```python
from django.views.decorators.http import require_http_methods
from django.apps import apps
from .helpers.GetAlertsByProviderView import GetAlertsByProviderView
from .helpers.PatchAlertsByProviderView import PatchAlertsByProviderView
app_config = apps.get_app_config('api')
@require_http_methods(["GET"])
def get_alerts_by_provider_view(request):
response_data = GetAlertsByProviderView(request).execute()
return response_data
@require_http_methods(["PATCH"])
def patch_alerts_by_provider_view(request, alert_id):
response_data = PatchAlertsByProviderView(request).execute(alert_id)
return response_data
``` |
{
"source": "josegomezr/graph_db",
"score": 3
} |
#### File: graph_db/graph_db/types.py
```python
class BaseDBDriver():
"""
This will stub the most basic methods that a GraphDB driver must have.
"""
_connected = False
_settings = {}
def __init__(self, dbapi):
self.dbapi = dbapi
def _debug(self, *args):
if self.debug:
print ("[GraphDB #%x]:" % id(self), *args)
def _debugOut(self, *args):
self._debug("OUT --> ", *args)
def _debugIn(self, *args):
self._debug("IN <-- ", *args)
def connect(self):
"""
Performs connection to the database service.
connect() -> self
"""
raise NotImplementedError('Not Implemented Yet')
def query(self, sql):
"""
Performs a query to the database.
query( sql ) -> dict
"""
raise NotImplementedError('Not Implemented Yet')
def disconnect(self):
"""
Performs disconnection and garbage collection for the driver.
connect() -> self
"""
raise NotImplementedError('Not Implemented Yet')
class BaseEdgeDriver(object):
"""
Base driver for managing Edges.
This will provide CRUD & search operations to be extended by
drivers.
"""
def __init__(self, driver):
self.driver = driver
def create(self, eType, origin, destiny, data = {}):
"""
create(eType, origin, destiny [, data]) -> dict
Creates an edge from *origin* to *destiny*
"""
raise NotImplementedError('Not Implemented Yet')
def update(self, eType, criteria = {}, data = {}):
"""
update(eType [, criteria [, data]]) -> dict
Update edges mathing a given criteria
"""
raise NotImplementedError('Not Implemented Yet')
def delete(self, eType, criteria = {}):
"""
delete(eType [, criteria]) -> dict
Delete edges mathing a given criteria
"""
raise NotImplementedError('Not Implemented Yet')
def find(self, eType, criteria = {}):
"""
find(eType [, criteria]) -> list
Find an edge for a given criteria.
"""
raise NotImplementedError('Not Implemented Yet')
class BaseVertexDriver(object):
"""
Base driver for managing Vertexes.
This will provide CRUD & search operations to be extended by
drivers.
"""
def __init__(self, driver):
self.driver = driver
def create(self, vType, data = {}):
"""
create(vType, [, data]) -> dict
Create a Vertex
"""
raise NotImplementedError('Not Implemented Yet')
def update(self, vType, criteria = {}, data = {}):
"""
update(vType, criteria, data) -> dict
Update a Vertex given a criteria
"""
raise NotImplementedError('Not Implemented Yet')
def delete(self, vType, criteria = {}):
"""
delete(vType, criteria) -> dict
Delete a Vertex given a criteria
"""
raise NotImplementedError('Not Implemented Yet')
def find(self, vType, criteria = None):
"""
find(vType [, criteria]) -> list
Look for vertexes matching criteria.
"""
raise NotImplementedError('Not Implemented Yet')
``` |
{
"source": "josegomezr/pyqb",
"score": 3
} |
#### File: pyqb/pqb/expressions.py
```python
import re
REGEX_CLEANER = re.compile(r"[^a-z0-9_@]", re.I)
class AliasExpression(object):
"""
Crea una expresion SQL alias <campo> AS <alias> o simplemente <campo> si no fue provisto un alias
"""
def __init__(self, field_name, alias=None, *args, **kwargs):
super(self.__class__, self).__init__()
if isinstance(field_name, list):
field_name, alias = field_name
if ' as ' in field_name.lower():
self.field_name, self.alias = field_name.split(' as ')
else:
self.field_name = field_name
self.alias = alias
def result(self):
"""
Construye la expresion
"""
field = re.sub(REGEX_CLEANER, '', self.field_name)
if self.alias:
alias = re.sub(REGEX_CLEANER, '', self.alias)
return "%s AS %s" % (field, alias)
else:
return field
class ConditionExpression(object):
"""
Crea una expresion SQL condicional, usadas en WHERE y HAVING:
<campo> <operador> <valor>
"""
def __init__(self, field, value, *args, **kwargs):
self.field = field
self.value = value
self.operator = kwargs.get('operator', '=')
self.conjunction = kwargs.get('conjunction')
def result(self):
"""
Construye la expresion
"""
field = re.sub(REGEX_CLEANER, '', self.field)
try:
value = float(self.value)
except TypeError:
value = "(%s)" % ( "', '".join(self.value) )
except ValueError:
value = str(self.value) \
.replace("\\", r"\\") \
.replace('"', r'\"') \
.replace("'", r"\'")
value = "'%s'" % value
res = "%s %s %s" % (field, self.operator, value)
if self.conjunction:
res = "%s %s" % (self.conjunction, res)
return res
class OrderByExpression(object):
"""
Crea una expresion SQL de ordenamiento.
"""
def __init__(self, field, orientation = 'ASC'):
super(OrderByExpression, self).__init__()
if isinstance(field, list):
self.field, self.orientation = field[0:2]
elif 'ASC' in field.upper() or 'DESC' in field.upper():
self.field, self.orientation = field.split(' ')
else:
self.field = field
self.orientation = orientation
def result(self):
"""
Construye la expresion
"""
return "%s %s" % (self.field, self.orientation)
``` |
{
"source": "josegonzalez/ebs-deploy",
"score": 2
} |
#### File: ebs_deploy/commands/zdt_deploy_command.py
```python
import time
from ebs_deploy import out, get, parse_env_config, parse_option_settings, upload_application_archive
def add_arguments(parser):
"""
adds arguments for the deploy command
"""
parser.add_argument('-e', '--environment', help='Environment name', required=True)
parser.add_argument('-w', '--dont-wait', help='Skip waiting', action='store_true')
parser.add_argument('-a', '--archive', help='Archive file', required=False)
parser.add_argument('-d', '--directory', help='Directory', required=False)
parser.add_argument('-l', '--version-label', help='Version label', required=False)
parser.add_argument('-t', '--termination-delay',
help='Delay termination of old environment by this number of seconds',
type=int, required=False)
def execute(helper, config, args):
"""
Deploys to an environment
"""
version_label = args.version_label
archive = args.archive
# get the environment configuration
env_config = parse_env_config(config, args.environment)
option_settings = parse_option_settings(env_config.get('option_settings', {}))
cname_prefix = env_config.get('cname_prefix', None)
# find existing environment name
old_env_name = helper.environment_name_for_cname(cname_prefix)
if old_env_name is None:
raise Exception("Unable to find current environment with cname: " + cname_prefix)
out("Current environment name is " + old_env_name)
# find an available environment name
out("Determining new environment name...")
new_env_name = None
if not helper.environment_exists(args.environment):
new_env_name = args.environment
else:
for i in xrange(10):
temp_env_name = args.environment + '-' + str(i)
if not helper.environment_exists(temp_env_name):
new_env_name = temp_env_name
break
if new_env_name is None:
raise Exception("Unable to determine new environment name")
out("New environment name will be " + new_env_name)
# find an available cname name
out("Determining new environment cname...")
new_env_cname = None
for i in xrange(10):
temp_cname = cname_prefix + '-' + str(i)
if not helper.environment_name_for_cname(temp_cname):
new_env_cname = temp_cname
break
if new_env_cname is None:
raise Exception("Unable to determine new environment cname")
out("New environment cname will be " + new_env_cname)
# upload or build an archive
version_label = upload_application_archive(
helper, env_config, archive=args.archive, directory=args.directory, version_label=version_label)
# create the new environment
helper.create_environment(new_env_name,
solution_stack_name=env_config.get('solution_stack_name'),
cname_prefix=new_env_cname,
description=env_config.get('description', None),
option_settings=option_settings,
version_label=version_label,
tier_name=env_config.get('tier_name'),
tier_type=env_config.get('tier_type'),
tier_version=env_config.get('tier_version'))
helper.wait_for_environments(new_env_name, status='Ready', health='Green', include_deleted=False)
# swap C-Names
out("Swapping environment cnames")
helper.swap_environment_cnames(old_env_name, new_env_name)
helper.wait_for_environments([old_env_name, new_env_name], status='Ready', include_deleted=False)
# delete the old environment
if args.termination_delay:
out("Termination delay specified, sleeping for {} seconds...".format(args.termination_delay))
time.sleep(args.termination_delay)
helper.delete_environment(old_env_name)
# delete unused
helper.delete_unused_versions(versions_to_keep=int(get(config, 'app.versions_to_keep', 10)))
```
#### File: ebs-deploy/ebs_deploy/__init__.py
```python
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.beanstalk import connect_to_region
from boto.s3.key import Key
from datetime import datetime
from time import time, sleep
import zipfile
import os
import subprocess
import sys
import yaml
import re
def out(message):
"""
print alias
"""
sys.stdout.write(message + "\n")
sys.stdout.flush()
def merge_dict(dict1, dict2):
ret = dict(dict2)
for key, val in dict1.items():
val2 = dict2.get(key)
if val2 is None:
ret[key] = val
elif isinstance(val, dict) and isinstance(val2, dict):
ret[key] = merge_dict(val, val2)
elif isinstance(val, (list)) and isinstance(val2, (list)):
ret[key] = val + val2
else:
ret[key] = val2
return ret
def get(vals, key, default_val=None):
"""
Returns a dictionary value
"""
val = vals
for part in key.split('.'):
if isinstance(val, dict):
val = val.get(part, None)
if val is None:
return default_val
else:
return default_val
return val
def parse_option_settings(option_settings):
"""
Parses option_settings as they are defined in the configuration file
"""
ret = []
for namespace, params in option_settings.items():
for key, value in params.items():
ret.append((namespace, key, value))
return ret
def parse_env_config(config, env_name):
"""
Parses an environment config
"""
all_env = get(config, 'app.all_environments', {})
env = get(config, 'app.environments.' + str(env_name), {})
return merge_dict(all_env, env)
def upload_application_archive(helper, env_config, archive=None, directory=None, version_label=None):
if version_label is None:
version_label = datetime.now().strftime('%Y%m%d_%H%M%S')
archive_file_name = None
if archive:
archive_file_name = os.path.basename(archive)
# generate the archive externally
if get(env_config, 'archive.generate'):
cmd = get(env_config, 'archive.generate.cmd')
output_file = get(env_config, 'archive.generate.output_file')
use_shell = get(env_config, 'archive.generate.use_shell', True)
exit_code = get(env_config, 'archive.generate.exit_code', 0)
if not cmd or not output_file:
raise Exception('Archive generation requires cmd and output_file at a minimum')
output_regex = None
try:
output_regex = re.compile(output_file)
except:
pass
result = subprocess.call(cmd, shell=use_shell)
if result != exit_code:
raise Exception('Generate command execited with code %s (expected %s)' % (result, exit_code))
if output_file and os.path.exists(output_file):
archive_file_name = os.path.basename(output_file)
else:
for root, dirs, files in os.walk(".", followlinks=True):
for f in files:
fullpath = os.path.join(root, f)
if fullpath.endswith(output_file):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
elif output_regex and output_regex.match(fullpath):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
if archive:
break
if not archive or not archive_file_name:
raise Exception('Unable to find expected output file matching: %s' % (output_file))
# create the archive
elif not archive:
if not directory:
directory = "."
includes = get(env_config, 'archive.includes', [])
excludes = get(env_config, 'archive.excludes', [])
archive_files = get(env_config, 'archive.files', [])
def _predicate(f):
for exclude in excludes:
if re.match(exclude, f):
return False
if len(includes) > 0:
for include in includes:
if re.match(include, f):
return True
return False
return True
archive = create_archive(directory, str(version_label) + ".zip", config=archive_files, ignore_predicate=_predicate)
archive_file_name = str(version_label) + ".zip"
helper.upload_archive(archive, archive_file_name)
helper.create_application_version(version_label, archive_file_name)
return version_label
def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']):
"""
Creates an archive from a directory and returns
the file that was created.
"""
zip = zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(directory))
# create it
out("Creating archive: " + str(filename))
for root, dirs, files in os.walk(directory, followlinks=True):
archive_root = os.path.abspath(root)[root_len + 1:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
# ignore the file we're creating
if filename in fullpath:
continue
# ignored files
if ignored_files is not None:
for name in ignored_files:
if fullpath.endswith(name):
out("Skipping: " + str(name))
continue
# do predicate
if ignore_predicate is not None:
if not ignore_predicate(archive_name):
out("Skipping: " + str(archive_name))
continue
out("Adding: " + str(archive_name))
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
# add config
for conf in config:
for conf, tree in conf.items():
if tree.has_key('yaml'):
content = yaml.dump(tree['yaml'], default_flow_style=False)
else:
content = tree.get('content', '')
out("Writing config file for " + str(conf))
zip.writestr(conf, content)
zip.close()
return filename
class AwsCredentials:
"""
Class for holding AwsCredentials
"""
def __init__(self, access_key, secret_key, region, bucket, bucket_path):
self.access_key = access_key
self.secret_key = secret_key
self.bucket = bucket
self.region = region
self.bucket_path = bucket_path
if not self.bucket_path.endswith('/'):
self.bucket_path += '/'
class EbsHelper(object):
"""
Class for helping with ebs
"""
def __init__(self, aws, app_name=None):
"""
Creates the EbsHelper
"""
self.aws = aws
self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key)
self.s3 = S3Connection(aws.access_key, aws.secret_key, host=(
lambda r: 's3.amazonaws.com' if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(aws.region))
self.app_name = app_name
def swap_environment_cnames(self, from_env_name, to_env_name):
"""
Swaps cnames for an environment
"""
self.ebs.swap_environment_cnames(source_environment_name=from_env_name,
destination_environment_name=to_env_name)
def upload_archive(self, filename, key, auto_create_bucket=True):
"""
Uploads an application archive version to s3
"""
try:
bucket = self.s3.get_bucket(self.aws.bucket)
if ((
self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or (
self.aws.region == 'us-east-1' and bucket.get_location() != '') or (
self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'):
raise Exception("Existing bucket doesn't match region")
except S3ResponseError:
bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region)
def __report_upload_progress(sent, total):
if not sent:
sent = 0
if not total:
total = 0
out("Uploaded " + str(sent) + " bytes of " + str(total) \
+ " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)")
# upload the new version
k = Key(bucket)
k.key = self.aws.bucket_path + key
k.set_metadata('time', str(time()))
k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
def list_available_solution_stacks(self):
"""
Returns a list of available solution stacks
"""
stacks = self.ebs.list_available_solution_stacks()
return stacks['ListAvailableSolutionStacksResponse']['ListAvailableSolutionStacksResult']['SolutionStacks']
def create_application(self, description=None):
"""
Creats an application and sets the helpers current
app_name to the created application
"""
out("Creating application " + str(self.app_name))
self.ebs.create_application(self.app_name, description=description)
def delete_application(self):
"""
Creats an application and sets the helpers current
app_name to the created application
"""
out("Deleting application " + str(self.app_name))
self.ebs.delete_application(self.app_name, terminate_env_by_force=True)
def application_exists(self):
"""
Returns whether or not the given app_name exists
"""
response = self.ebs.describe_applications(application_names=[self.app_name])
return len(response['DescribeApplicationsResponse']['DescribeApplicationsResult']['Applications']) > 0
def create_environment(self, env_name, version_label=None,
solution_stack_name=None, cname_prefix=None, description=None,
option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'):
"""
Creates a new environment
"""
out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type))
self.ebs.create_environment(self.app_name, env_name,
version_label=version_label,
solution_stack_name=solution_stack_name,
cname_prefix=cname_prefix,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version)
def environment_exists(self, env_name):
"""
Returns whether or not the given environment exists
"""
response = self.ebs.describe_environments(application_name=self.app_name, environment_names=[env_name],
include_deleted=False)
return len(response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']) > 0 \
and response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'][0][
'Status'] != 'Terminated'
def rebuild_environment(self, env_name):
"""
Rebuilds an environment
"""
out("Rebuilding " + str(env_name))
self.ebs.rebuild_environment(environment_name=env_name)
def get_environments(self):
"""
Returns the environments
"""
response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)
return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
def delete_environment(self, environment_name):
"""
Deletes an environment
"""
self.ebs.terminate_environment(environment_name=environment_name, terminate_resources=True)
def update_environment(self, environment_name, description=None, option_settings=[], tier_type=None, tier_name=None,
tier_version='1.0'):
"""
Updates an application version
"""
out("Updating environment: " + str(environment_name))
messages = self.ebs.validate_configuration_settings(self.app_name, option_settings,
environment_name=environment_name)
messages = messages['ValidateConfigurationSettingsResponse']['ValidateConfigurationSettingsResult']['Messages']
ok = True
for message in messages:
if message['Severity'] == 'error':
ok = False
out("[" + message['Severity'] + "] " + str(environment_name) + " - '" \
+ message['Namespace'] + ":" + message['OptionName'] + "': " + message['Message'])
self.ebs.update_environment(
environment_name=environment_name,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version)
def environment_name_for_cname(self, env_cname):
"""
Returns an environment name for the given cname
"""
envs = self.get_environments()
for env in envs:
if env['Status'] != 'Terminated' and env['CNAME'].lower().startswith(env_cname.lower() + '.'):
return env['EnvironmentName']
return None
def deploy_version(self, environment_name, version_label):
"""
Deploys a version to an environment
"""
out("Deploying " + str(version_label) + " to " + str(environment_name))
self.ebs.update_environment(environment_name=environment_name, version_label=version_label)
def create_application_version(self, version_label, key):
"""
Creates an application version
"""
out("Creating application version " + str(version_label) + " for " + str(key))
self.ebs.create_application_version(self.app_name, version_label,
s3_bucket=self.aws.bucket, s3_key=self.aws.bucket_path+key)
def delete_unused_versions(self, versions_to_keep=10):
"""
Deletes unused versions
"""
# get versions in use
environments = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)
environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
versions_in_use = []
for env in environments:
versions_in_use.append(env['VersionLabel'])
# get all versions
versions = self.ebs.describe_application_versions(application_name=self.app_name)
versions = versions['DescribeApplicationVersionsResponse']['DescribeApplicationVersionsResult'][
'ApplicationVersions']
versions = sorted(versions, reverse=True, cmp=lambda x, y: cmp(x['DateCreated'], y['DateCreated']))
# delete versions in use
for version in versions[versions_to_keep:]:
if version['VersionLabel'] in versions_in_use:
out("Not deleting " + version["VersionLabel"] + " because it is in use")
else:
out("Deleting unused version: " + version["VersionLabel"])
self.ebs.delete_application_version(application_name=self.app_name,
version_label=version['VersionLabel'])
sleep(2)
def wait_for_environments(self, environment_names, health=None, status=None, version_label=None,
include_deleted=True, wait_time_secs=300):
"""
Waits for an environment to have the given version_label
and to be in the green state
"""
# turn into a list
if not isinstance(environment_names, (list, tuple)):
environment_names = [environment_names]
environment_names = environment_names[:]
# print some stuff
s = "Waiting for environment(s) " + (", ".join(environment_names)) + " to"
if health is not None:
s += " have health " + health
else:
s += " have any health"
if version_label is not None:
s += " and have version " + version_label
if status is not None:
s += " and have status " + status
out(s)
started = time()
while True:
# bail if they're all good
if len(environment_names) == 0:
break
# wait
sleep(5)
# # get the env
environments = self.ebs.describe_environments(
application_name=self.app_name,
environment_names=environment_names,
include_deleted=include_deleted)
environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
if len(environments) <= 0:
raise Exception("Couldn't find any environments")
# loop through and wait
for env in environments[:]:
env_name = env['EnvironmentName']
# the message
msg = "Environment " + env_name + " is " + str(env['Health'])
if version_label is not None:
msg = msg + " and has version " + str(env['VersionLabel'])
if status is not None:
msg = msg + " and has status " + str(env['Status'])
# what we're doing
good_to_go = True
if health is not None:
good_to_go = good_to_go and str(env['Health']) == health
if status is not None:
good_to_go = good_to_go and str(env['Status']) == status
if version_label is not None:
good_to_go = good_to_go and str(env['VersionLabel']) == version_label
if env['Status'] == 'Ready' and env['Health'] == 'Red':
out('Deploy failed')
raise Exception('Ready and red')
# log it
if good_to_go:
out(msg + " ... done")
environment_names.remove(env_name)
else:
out(msg + " ... waiting")
# check the time
elapsed = time() - started
if elapsed > wait_time_secs:
message = "Wait time for environment(s) {environments} to be {health} expired".format(
environments=" and ".join(environment_names), health=(health or "Green")
)
raise Exception(message)
``` |
{
"source": "JoseGtz/2021_python_selenium",
"score": 3
} |
#### File: 2021_python_selenium/Module_02/browsing.py
```python
from pathlib import Path
from selenium import webdriver
def get_project_root() -> Path:
return Path(__file__).parent.parent
def get_chrome_path() -> Path:
root = get_project_root()
return root.joinpath('drivers', 'chromedriver')
driver = webdriver.Chrome(executable_path=get_chrome_path())
driver.get('https://www.google.com')
print(f'Current Title: {driver.title}')
print(f'Current URL: {driver.current_url}')
driver.get('https://www.mlb.com/es')
print(f'Current Title: {driver.title}')
print(f'Current URL: {driver.current_url}')
print(f'Current Source: {driver.page_source}')
driver.get('https://www.nytimes.com/es')
driver.refresh()
print(f'Current Title: {driver.title}')
print(f'Current URL: {driver.current_url}')
driver.back()
driver.back()
print(f'Current Title: {driver.title}')
print(f'Current URL: {driver.current_url}')
print(f'Cookies: {driver.get_cookies()}')
print(f'Cache: {driver.application_cache}')
print(f'Search Element: {driver.find_elements_by_partial_link_text("SOCzOAOac8uhByk5ZGU2Zg==")}')
if "SOCzOAOac8uhByk5ZGU2Zg==" in driver.page_source:
print("Found")
driver.quit
```
#### File: src/pages/inventory_details.py
```python
from selenium.webdriver.remote.webdriver import WebDriver
from Module_06.src.elements.base_page_element import BasePageElement
from Module_06.src.elements.header import Header
from Module_06.src.locators.inventory_details import InventoryDetailsLoc
from Module_06.src.mixin.InventoryItemMixin import InventoryItemMixin
from Module_06.src.pages.base_page import BasePage
_URL = 'https://www.saucedemo.com/inventory-item.html?id={0}'
class InventoryDetailsPage(InventoryItemMixin, BasePage):
"""Implements inventory item details"""
def __init__(self, driver: WebDriver, timeout: int = 5):
super().__init__(driver, _URL, timeout)
self._title = BasePageElement(InventoryDetailsLoc.TITLE, wait=self._wait)
self._description = BasePageElement(InventoryDetailsLoc.DESCRIPTION, wait=self._wait)
self._price = BasePageElement(InventoryDetailsLoc.PRICE, wait=self._wait)
self._inv_btn = BasePageElement(InventoryDetailsLoc.BTN, wait=self._wait)
self._back_btn = BasePageElement(InventoryDetailsLoc.BACK_BTN, wait=self._wait)
self.header = Header(self._wait)
def back(self):
"""Go back to details page."""
self._back_btn.click()
```
#### File: tests/sauce_lab/test_checkout_details.py
```python
import pytest
from Module_06.src.elements.inventory_item import InventoryItem
from Module_06.src.pages.login import LoginPage
from Module_06.tests.common.test_base import TestBase
from Module_06.src.pages.checkout_details import CheckoutDetailsPage
from Module_06.src.pages.checkout_information import CheckoutInformationPage
_DEF_USER = 'standard_user'
_DEF_PASSWORD = '<PASSWORD>'
class TestCheckoutDetails(TestBase):
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_details(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.continue_shopping()
inventory.products.reload()
print(f'Total elements in cart: {inventory.header.get_total_cart_items()}')
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_information(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.checkout_btn()
checkout_page = CheckoutInformationPage(self.driver, 5)
checkout_page.cancel_checkout()
print("Checkout Canceled")
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_remove(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.remove_item_checkout()
print("Checkout Canceled")
``` |
{
"source": "joseguilhermefmoura/Advent-of-Code-2020",
"score": 4
} |
#### File: DAY 01/SECOND HALF/main.py
```python
def multiply(list_of_numbers: list) -> int:
result = 1
for number in list_of_numbers:
result = result * number
return result
def get_puzzle_answer() -> int:
file_input = open("input.txt", "r") # Read the file
file_lines = file_input.readlines() # Get all lines from it
for line in file_lines: # For each number
for j in file_lines: # Read a new number
for k in file_lines: # And another one, then check:
if int(line) + int(j) + int(k) == 2020:
# If not solved, check it again. But if solved:
file_input.close()
return multiply([int(line), int(j), int(k)])
def __main__():
print("Merry Christmas! The answer for this problem is: {0}".format(get_puzzle_answer()))
if __name__ == "__main__":
__main__()
```
#### File: DAY 02/FIRST HALF/main.py
```python
def get_puzzle_answer() -> int:
file_input = open("input.txt", "r") # Read the file
file_lines = file_input.readlines() # Get all lines from it
result = 0
for line in file_lines:
# For each line, get all the informations we need:
line_info = line.split(' ')
password = line_info[2]
minimum = int(line_info[0].split('-')[0])
maximum = int(line_info[0].split('-')[1])
rule = line_info[1][0]
count = password.count(rule)
# If it is valid, add it to the result
if count >= minimum and count <= maximum:
result += 1
return result
def __main__():
print("Merry Christmas! The answer for this problem is: {0}".format(get_puzzle_answer()))
if __name__ == "__main__":
__main__()
``` |
{
"source": "joseguilhermefmoura/pf",
"score": 4
} |
#### File: pf/PROBLEMA_2/solucao_elaborada.py
```python
import pandas as pd
def get_registers(csv_file):
'''
Essa função lê um arquivo csv e retorna uma lista de registros ordenados (dicionários)
O delimitador deve ser ';' e deve seguir o cabeçalho <Id;nome;telefone;idade>
'''
# Lê o csv_file como panda dataframe
df = pd.read_csv(csv_file, delimiter=';')
# Ordena por nome
df = df.sort_values('nome')
# Retorna a lista de dicionários (registros)
return df.T.to_dict().values()
def __main__():
# Imprime o resultado após chamar a função:
for register in get_registers('entrada.csv'):
print(register)
__main__()
``` |
{
"source": "josehbez/awesome-privacy",
"score": 3
} |
#### File: josehbez/awesome-privacy/readme_test.py
```python
import unittest
from urllib.parse import urlparse
try:
import markdown
from bs4 import BeautifulSoup
except ImportError as e:
print(e, "Use: pip install beautifulsoup4 markdown")
exit(0)
class TestReadme(unittest.TestCase):
def setUp(self) -> None:
with open('README.md', 'r') as md:
content = md.read()
contentHTML = markdown.markdown(content)
self.soup = BeautifulSoup(contentHTML, "html.parser")
return super().setUp()
def test_readme(self):
tc_title = self.soup.find("h2", text='Table of Contents')
self.assertNotEqual(tc_title, None)
tc_unordered_list = tc_title.find_next("ul")
self.assertNotEqual(tc_unordered_list, None)
tc_hyperlinks = tc_unordered_list.find_all('a')
self.assertNotEqual(len(tc_hyperlinks), 0)
tc_hyperlink_order = []
for a in tc_hyperlinks:
a_text = a.get_text()
a_href = a['href']
# Check Struct href
self.assertEqual(a_href,
"#{}".format(a_text.lower().replace(' ', '-')))
# Check if exists href
h3 = self.soup.find_all("h3", text=a_text)
self.assertEqual(len(h3), 1)
tc_hyperlink_order.append(a_text)
# check if table of contents is sorted
tc_hyperlink_order_asc = sorted(tc_hyperlink_order)
self.assertListEqual(tc_hyperlink_order_asc, tc_hyperlink_order)
# Check if item table of contents is sorted on body
self.assertListEqual(tc_hyperlink_order_asc,
[h.get_text() for h in self.soup.find_all("h3")])
# check if sub-items body is sorted
domain = lambda s: urlparse(s).netloc
hypelinks = []
for i in tc_hyperlink_order_asc:
s_title = self.soup.find("h3", text=i)
s_unordered_list = s_title.find_next("ul")
self.assertNotEqual(s_unordered_list, None)
s_hyperlinks = s_unordered_list.find_all('a')
self.assertNotEqual(len(s_hyperlinks), 0)
s_hyperlink_order = []
for a in s_hyperlinks:
a_text = a.get_text()
a_href = domain(a['href'])
s_hyperlink_order.append(a_text)
# Check if hyperlink duplicate
self.assertFalse(a_href in hypelinks,
"%s : Hypelink duplicate " % a_href)
hypelinks.append(a_href)
# check sub-items is sorted
s_hyperlink_order_asc = sorted(s_hyperlink_order)
self.assertListEqual(s_hyperlink_order_asc, s_hyperlink_order)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josehbez/cumplo",
"score": 2
} |
#### File: cumplo/bm/models.py
```python
from django.db import models
class Serie(models.Model):
date = models.DateField("Date Publish")
value = models.FloatField("Value")
serie = models.CharField("Serie ID", max_length=10)
def __str__(self):
return '%s , value: %s , date: %s '%(self.serie, self.value, self.date)
```
#### File: cumplo/bm/test_api.py
```python
from django.test import TestCase
from . import libbm
class APITestCase(TestCase):
def test_udis(self):
serie_id = libbm.SERIE_UDIS
payload = libbm.make_request(serie_id,'2021-01-01','2021-01-01')
self.assertDictEqual(payload, {serie_id: [{'fecha': '01/01/2021', 'dato': '6.606988'}]})
def test_dollar(self):
serie_id = libbm.SERIE_DOLLAR
payload = libbm.make_request(serie_id,'2021-01-04','2021-01-04')
self.assertDictEqual(payload, {serie_id: [{'fecha': '04/01/2021', 'dato': '19.8457'}]})
def test_tiie(self):
serie_id = libbm.SERIE_TIIE
payload = libbm.make_request(serie_id,'2021-01-04','2021-01-04')
self.assertDictEqual(payload, {'SF331451': [{'fecha': '04/01/2021', 'dato': '4.30'}], 'SF43783': [{'fecha': '04/01/2021', 'dato': '4.4805'}], 'SF43878': [{'fecha': '04/01/2021', 'dato': '4.4590'}], 'SF111916': None, 'SF43947': None})
``` |
{
"source": "josehbez/gnome-pomodoro-tracking",
"score": 2
} |
#### File: gnome-pomodoro-tracking/plugins/toggl.py
```python
import configparser
from .gpt_plugin import GPTPlugin
from .gpt_utils import printtbl, join_url,\
find_by_id, only_columns, config_attrs
class Toggl(GPTPlugin):
name = "toggl"
url = "https://api.track.toggl.com/api/v8"
token = None
def __init__(self, gpt):
super().__init__(gpt)
def setup(self):
try:
self.token = self.gpt.get_config(self.name, "token")
except configparser.NoSectionError as e:
self.gpt.logger.error(e)
self.gpt.add_section_config(self.name)
self.add_parse_args(kind="setup-args")
except configparser.NoOptionError as e:
self.gpt.logger.error(e)
self.add_parse_args(kind="setup-args")
params = self.gpt.parse.parse_args()
self.token = params.toggl_token
try:
if self.auth():
self.gpt.set_config(self.name, "token", self.token)
print(f"{self.name} now can do you use.")
else:
raise Exception("Fail auth")
except Exception as e:
self.gpt.logger.critical(e)
exit(0)
def add_parse_args(self, kind):
if kind == "setup-args":
self.gpt.parse.add_argument('--toggl-token',
action='store',
dest='toggl_token',
help=' e.g 23bc78d4e46edd5479885db4260ecsf3',
required=True)
else:
self.gpt.parse.add_argument('--toggl-workspaces',
action='store_const',
dest='toggl_workspaces',
help='List workspaces',
const=True)
self.gpt.parse.add_argument('--toggl-projects',
action='store_const',
dest='toggl_projects',
help='List projects',
const=True)
def http_auth(self):
return (self.token, "api_token")
def auth(self):
try:
req = self.rget(join_url(self.url, "me" ), auth=self.http_auth())
if req.ok:
data = req.json()
if data['data']['id']:
return True
else:
raise Exception(req.text)
except Exception as e:
self.gpt.logger.exception(e)
return False
def cli(self):
params = self.gpt.parse.parse_args()
if hasattr(params, 'toggl_workspaces') and params.toggl_workspaces:
try:
rows = self.workspaces()
if rows:
rows = only_columns(rows)
if params.set:
row = find_by_id(rows, params.set)
if row:
self.gpt.set_config(self.name, "workspace_id", row.get('id') )
self.gpt.set_config(self.name, "workspace_name", row.get('name') )
self.gpt.set_config(self.name, "project_id", "")
self.gpt.set_config(self.name, "project_name", "" )
printtbl([row])
else:
print('The workspace ID was not found')
else:
printtbl(rows)
else:
raise Exception("Fail to get workspaces")
except Exception as e:
self.gpt.logger.exception(e)
elif hasattr(params, 'toggl_projects') and params.toggl_projects:
try:
workspace_id = self.gpt.get_config(self.name, "workspace_id")
except Exception as e:
workspace = self.workspaces(filter='first')
workspace_id = workspace.get('id')
try:
rows = self.projects(workspace_id)
if rows:
rows = only_columns(rows)
if params.set:
row = find_by_id(rows, params.set)
if row:
self.gpt.set_config(self.name, "project_id", row.get('id') )
self.gpt.set_config(self.name, "project_name", row.get('name') )
printtbl([row])
else:
print('The project ID was not found')
else:
printtbl(rows)
else:
raise Exception("Fail to get projects")
except Exception as e:
self.gpt.logger.exception(e)
def workspaces(self, filter=""):
url = join_url(self.url, "workspaces")
try:
req = self.rget(url, auth=self.http_auth())
if req.ok:
data = req.json()
self.gpt.logger.info(data)
if filter == 'first':
return len(data) and data[0]
return data
else:
raise Exception(req.text)
except Exception as e:
self.gpt.logger.exception(e)
return None
def projects(self, workspace_id, filter=""):
try:
url = join_url(self.url, "workspaces/{}/projects".format(workspace_id))
req = self.rget(url, auth=self.http_auth())
if req.ok:
data = req.json()
self.gpt.logger.info(data)
if filter == 'first':
return len(data) and data[0]
else:
raise Exception(req.text)
return data
except Exception as e:
self.gpt.logger.exception(e)
return None
def add_time_entry(self, **kwargs):
name = kwargs.get('name')
start = kwargs.get('start')
end = kwargs.get('end')
minutes = kwargs.get('minutes')
workspace_id = None
try:
workspace_id = self.gpt.get_config(self.name, "workspace_id")
except Exception as e:
try:
workspace, err = self.workspaces(filter='first')
workspace_id = workspace.get('id')
except Exception as e:
pass
project_id = None
try:
project_id = self.gpt.get_config(self.name, "project_id")
except Exception as e:
pass
time_entry = {
"start": start, # Required
"description": name,
"projectId": project_id,
"stop": end, # Required
"duration": float(minutes) * 60,
"created_with": "gp-tracking"
}
if workspace_id:
time_entry.update({'wid': workspace_id})
if project_id:
time_entry.update({'pid': project_id})
try:
url = join_url(self.url, "time_entries")
req = self.rpost(
url, auth=self.http_auth(),
json={"time_entry": time_entry}
)
if req.ok:
data = req.json()
self.gpt.logger.info(data)
return {'id': data['data']['id'], 'name': name}
else:
raise Exception(req.text)
except Exception as e:
self.gpt.logger.exception(e)
return None
def status(self):
attrs = ['workspace_name', 'project_name']
items = config_attrs(self.gpt, self.name, attrs, formatter='status')
printtbl(items)
``` |
{
"source": "josehbez/grainchain",
"score": 2
} |
#### File: grainchain/test1/app.py
```python
import uvicorn
from fastapi import FastAPI, HTTPException
from libmongo import LibMongo
from libzeta import LibZeta
from user import User, UserResponse, DeleteResponse, FakeUserResponse, UserUpdate
from faker import Faker
from random import randrange
import re
app = FastAPI()
def run():
uvicorn.run(app, port=8022)
@app.get("/")
def get_root():
return {"Hello": "World"}
libmongo = LibMongo(
"localhost:27017",
"jose",
"Ooph7Jahnohch7Hoah3pheejeizuetha",
"jose"
)
libzeta = LibZeta(
"http://localhost:3000",
"aw2Gei9NeePhiel6ohYi1hai",
"ooT7loh2ohPh6shopaideeX6"
)
def username_constraint(username):
if not re.match(r"^[A-Za-z_][A-Za-z0-9_]{3,29}$", username):
raise HTTPException(
status_code=400,
detail=f"The username {username} don't use the pattern")
return True
@app.post("/user", response_model=UserResponse)
def create_user(user: User):
"""
Parameters
----------
Request: User, required
{
"name": "string",
"username": "string",
"location": "string"
}
Returns
-------
Response:
{
"name": "string",
"username": "string",
"location": "string",
"zeta": {
"username": "string",
"token": "string",
"profiles": []
}
}
Raises
------
HTTPException
"""
new_user = None
try:
username_constraint(user.username)
if not libmongo.users.find_one({"username": user.username}):
user_data = user.dict()
zeta_data = libzeta.create_user(user.username)
user_data.update({'zeta': zeta_data})
new_user = UserResponse.parse_obj(user_data)
id = libmongo.users.insert_one(user_data)
else:
raise HTTPException(
status_code=409,
detail=f"The user with username {user.username} already exists")
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return new_user
@app.get("/user/{username}", response_model=UserResponse)
def get_user(username: str):
"""
Parameters
----------
username : str, required
Returns
-------
Response:
{
"name": "string",
"username": "string",
"location": "string",
"zeta": {
"username": "string",
"token": "string",
"profiles": []
}
}
Raises
------
HTTPException
"""
user = None
try:
username_constraint(username)
user_data = libmongo.users.find_one({"username": username})
if user_data:
user = UserResponse.parse_obj(user_data)
else:
raise HTTPException(
status_code=404,
detail=f"The user with username {username} don't exists")
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return user
@app.delete("/user/{username}", response_model=DeleteResponse)
def delete_user(username: str):
"""
Parameters
----------
username : str, required
Returns
-------
Response:
{
"deleted": true,
}
Raises
------
HTTPException
"""
response = False
try:
username_constraint(username)
filters = {"username": username}
user_data = libmongo.users.find_one(filters)
if user_data:
libmongo.users.delete_one(filters)
deleted = True
else:
raise HTTPException(
status_code=404,
detail=f"The user with username {username} don't exists")
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return DeleteResponse.parse_obj({'deleted': deleted})
@app.put("/user/{username}", response_model=UserResponse)
def update_user(username: str, user: UserUpdate):
"""
Parameters
----------
Request: User, required minimum one
{
"name": "string", # optional
"username": "string", # optional. Is new username, update the zeta data
"location": "string", # optional
"zeta": bool # optional. if is true, update the zeta data
}
Returns
-------
Response:
{
"name": "string",
"username": "string",
"location": "string",
"zeta": {
"username": "string",
"token": "string",
"profiles": []
}
}
Raises
------
HTTPException
"""
update_user = None
try:
username_constraint(username)
if libmongo.users.find_one({"username": username}):
user_data_raw = user.dict()
user_data = {}
for key in user_data_raw.keys():
if user_data_raw[key] is not None:
user_data.update({key: user_data_raw[key]})
if len(user_data.keys()) == 0:
raise HTTPException(
status_code=400,
detail="Is required minimum one field")
new_username = user_data.get('username', username)
if new_username != username:
username_constraint(new_username)
if not libmongo.users.find_one({"username": new_username}):
zeta_data = libzeta.create_user(new_username)
user_data.update({'zeta': zeta_data})
else:
raise HTTPException(
status_code=409,
detail=f"The username {new_username} already exists")
else:
if 'username' in user_data.keys():
del user_data['username']
if user_data.get("zeta", False) and not user_data.get('username', False):
zeta_data = libzeta.create_user(new_username)
user_data.update({'zeta': zeta_data})
libmongo.users.update_one(
{"username": username},
{"$set": user_data},
)
user_data = libmongo.users.find_one({"username": new_username})
update_user = UserResponse.parse_obj(user_data)
else:
raise HTTPException(
status_code=404,
detail=f"The user with username {username} don't exists")
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return update_user
@app.get('/fake/users', response_model=FakeUserResponse)
def get_fake_users():
"""
Returns
-------
Response:
{
"count": 0
}
Raises
------
HTTPException
"""
count = 0
try:
fake = Faker()
locations = [ fake.address() for i in range(0, 20)]
users = []
for i in range(0, 100):
user = User(
name=fake.name(),
username=fake.unique.first_name(),
location=locations[randrange(0, 20)],
)
user_data = user.dict()
user_data.update({"zeta": libzeta.create_user(user.username)})
users.append(user_data)
result = libmongo.users.insert_many(users)
count = len(result.inserted_ids)
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return FakeUserResponse.parse_obj({"count": count})
@app.delete("/fake/users", response_model=FakeUserResponse)
def delete_fake_users():
"""
Returns
-------
Response:
{
"count": 0
}
Raises
------
HTTPException
"""
count = 0
try:
result = libmongo.users.delete_many({})
count = result.deleted_count
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return FakeUserResponse.parse_obj({"count": count})
@app.get("/report/users")
def get_report_users():
"""
Returns
-------
Response:
{
"location_N": {
"location": "string",
"users": 0 # Total users located
},
"_total": 0 # Total users that exists
}
Raises
------
HTTPException
"""
report = {}
try:
total = libmongo.users.count_documents({})
if total == 0:
raise HTTPException(
status_code=404,
detail="Not found records"
)
locations = libmongo.users.aggregate([{
"$group": {
"_id": "$location",
"users": { "$sum": 1 }
}
}])
for key, location in enumerate(locations):
report.update({
f"location_{key}": {
"location": location.get("_id"),
"users": location.get("users"),
}})
report.update({"_total": total})
except HTTPException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return report
``` |
{
"source": "josehbez/try",
"score": 2
} |
#### File: try/app/__init__.py
```python
import os
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from flask_restful import Api
import os
app = Flask(__name__)
# Load config
config_name = os.getenv('APP_SETTINGS', 'dev')
if not len(config_name):
config_name='dev'
app.config.from_json("../deployments/%s/config.json" % config_name)
# DB manager
db = SQLAlchemy(app)
# API manager
api = Api(app, prefix='/v1')
def api_load_resources():
from app.auth.resources import Register, Login
api.add_resource(Register, "/auth/register")
api.add_resource(Login, "/auth/login")
from app.warehouse.resources import Warehouse
api.add_resource(Warehouse, "/warehouse", "/warehouse/<int:id>")
from app.product.resources import Product
api.add_resource(Product, "/product", "/product/<string:id>")
from app.stock.stock_move_resources import Purchase, Sale
api.add_resource(Purchase, "/purchase")
api.add_resource(Sale, "/sale")
from app.stock.stock_resources import Stock, StockProduct, StockWarehouse
api.add_resource(Stock, "/stock", "/stock/<int:product_id>/<int:warehouse_id>")
api.add_resource(StockProduct, "/stock/product/<int:product_id>")
api.add_resource(StockWarehouse, "/stock/warehouse/<int:warehouse_id>")
api_load_resources()
@app.route("/")
def index():
return render_template("index.html", environment=config_name)
@app.before_request
def _jwt_required():
from app.auth.resources import jwt_required
err = jwt_required()
if err != None:
return err, 404
#db.create_all()
```
#### File: app/product/models.py
```python
from app import db, commun
class Product(commun.BaseModel):
__tablename__ = "product"
sku = db.Column(db.String(250), nullable=False, unique=True)
def __init__(self, name, sku):
self.name = name
self.sku = sku
def serialize(self):
return {
'id': self.id,
'name': self.name,
'sku': self.sku,
}
@staticmethod
def by(**kwargs):
return Product.query.filter_by(**kwargs).first_or_404(description='Record with {} is not available'.format(str(kwargs)))
@staticmethod
def all():
return Product.query.all()
```
#### File: app/stock/stock_resources.py
```python
from app.stock.models import stock_all, stock_product_warehouse,\
stock_product, stock_warehouse
from app.commun import rp, BaseResource, is_int
class Stock(BaseResource):
def get(self, product_id:int=0, warehouse_id:int=0):
if product_id>0 and warehouse_id>0:
rows = stock_product_warehouse(product_id, warehouse_id)
else:
rows = stock_all()
if isinstance(rows, Exception):
return rp(message=str(rows)), 404
return rp(success=True,payload=rows), 200
class StockProduct(BaseResource):
def get(self, product_id:int=0):
if product_id>0:
rows = stock_product(product_id)
else:
return rp(message="The product ID must be an integer and greate zero"), 404
if isinstance(rows, Exception):
return rp(message=str(rows)), 404
return rp(success=True,payload=rows), 200
class StockWarehouse(BaseResource):
def get(self, warehouse_id:int=0):
if warehouse_id>0:
rows = stock_warehouse(warehouse_id)
else:
return rp(message="The warehouse ID must be an integer and greate zero"), 404
if isinstance(rows, Exception):
return rp(message=str(rows)), 404
return rp(success=True,payload=rows), 200
```
#### File: app/warehouse/resources.py
```python
from flask_restful import reqparse
from app.warehouse.models import Warehouse as WarehouseModel
from app.commun import rp, BaseResource
class Warehouse(BaseResource):
def reqparse(self):
post_parse = reqparse.RequestParser()
post_parse.add_argument('name', dest='name', required=True,
help="The warehouse's name")
post_parse.add_argument('address', dest='address', required=True,
help="The warehouse's address")
return post_parse.parse_args()
def get(self, id:int=0):
if id > 0:
rows = WarehouseModel.by(id=id).serialize()
else:
rows = [ a.serialize() for a in WarehouseModel.all()]
return rp(success=True, payload=rows)
def post(self):
args = self.reqparse()
row = WarehouseModel(args.name, args.address)
err = row.save()
if err != None:
res = rp(message=str(err)), 500
else:
res = rp(success=True, payload=row.serialize()), 201
return res
def put(self, id:int):
args = self.reqparse()
row = WarehouseModel.by(id=id)
row.name= args.name
row.address= args.address
err = row.update()
if err != None:
res = rp(message=str(err)), 500
else:
res = rp(success=True, payload=row.serialize()), 201
return res
def delete(self, id:int):
row = WarehouseModel.by(id=id)
err = row.delete()
if err != None:
res = rp(message=str(err)), 500
else:
res = rp(success=True, payload=row.serialize()), 204
return res
``` |
{
"source": "josehenriqueroveda/spraying-API",
"score": 3
} |
#### File: josehenriqueroveda/spraying-API/main.py
```python
from fastapi import FastAPI
from starlette.responses import RedirectResponse
from ratelimit import limits
import sys
import uvicorn
import requests
import json
import config
ONE_MINUTE = 60
app = FastAPI(title='Spraying conditions API',
description='API for real-time analysis of climatic conditions generating the result whether they are suitable or not for agricultural spraying.')
@app.get("/")
async def docs():
response = RedirectResponse(url='/docs')
return response
@limits(calls=30, period=ONE_MINUTE)
@app.get("/spray/condition")
async def check_spray_condition(city: str):
try:
response = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&lang=pt&appid={config.OWM_KEY}')
wheather_info = json.loads(response.text)
description = wheather_info['weather'][0]['main']
temperature = int(wheather_info['main']['temp'])
feels_like = int(wheather_info['main']['feels_like'])
humidity = int(wheather_info['main']['humidity'])
wind = int(wheather_info['wind']['speed'])*3.6
spray_condition = ''
bad_conditions = ['Thunderstorm', 'Drizzle', 'Rain', 'Snow']
if (description not in bad_conditions) and (10 < temperature < 30) and (10 < feels_like < 30) and (humidity > 50) and (3 < wind < 10):
spray_condition = 'Good weather conditions for spraying'
else:
if description in bad_conditions:
spray_condition = f'Bad weather conditions for spraying: {description}'
elif (temperature > 30) and (feels_like > 30) and (humidity > 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {temperature} °C is too hot for spraying'
elif (temperature <= 10) and (feels_like <= 10) and (humidity > 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {temperature} °C is too cold for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity < 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {humidity} % air humidity. It is below that recommended for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity > 50) and (wind < 3):
spray_condition = f'Bad weather conditions: The wind speed of {wind} km/h is very low and not recommended for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity > 50) and (wind > 10):
spray_condition = f'Bad weather conditions: The wind speed of {wind} km/h is above the recommended and can cause drift.'
else:
spray_condition = 'Bad weather conditions for spraying'
result = ({'city': city,
'description': description,
'temperature': f'{temperature} °C',
'feels_like': f'{feels_like} °C',
'humidity': f'{humidity} %',
'wind': f'{wind} km/h',
'spray_condition': spray_condition})
return result
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
``` |
{
"source": "joseherazo04/Covid19-SearchEngine",
"score": 3
} |
#### File: joseherazo04/Covid19-SearchEngine/update indexes.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pathlib import Path, PurePath
import pandas as pd
import requests
from requests.exceptions import HTTPError, ConnectionError
from rank_bm25 import BM25Okapi
import nltk
from nltk.corpus import stopwords
nltk.download("punkt")
import re
import pandas as pd
'''
FILES PATH
'''
input_dir = PurePath('2020-03-23')
# The all sources metadata file
metadata = pd.read_csv(input_dir / 'metadata.csv',
dtype={'Microsoft Academic Paper ID': str,
'pubmed_id': str})
# Convert the doi to a url
def doi_url(d): return f'http://{d}' if d.startswith('doi.org') else f'http://doi.org/{d}'
metadata.doi = metadata.doi.fillna('').apply(doi_url)
# Set the abstract to the paper title if it is null
metadata.abstract = metadata.abstract.fillna(metadata.title)
# Some papers are duplicated since they were collected from separate sources. Thanks <NAME>
duplicate_paper = ~(metadata.title.isnull() | metadata.abstract.isnull()) & (metadata.duplicated(subset=['title', 'abstract']))
metadata = metadata[~duplicate_paper].reset_index(drop=True)
def get(url, timeout=6):
try:
r = requests.get(url, timeout=timeout)
return r.text
except ConnectionError:
print(f'Cannot connect to {url}')
print(f'Remember to turn Internet ON in the Kaggle notebook settings')
except HTTPError:
print('Got http error', r.status, r.text)
class DataHolder:
'''
A wrapper for a dataframe with useful functions for notebooks
'''
def __init__(self, data: pd.DataFrame):
self.data = data
def __len__(self): return len(self.data)
def __getitem__(self, item): return self.data.loc[item]
def head(self, n:int): return DataHolder(self.data.head(n).copy())
def tail(self, n:int): return DataHolder(self.data.tail(n).copy())
def _repr_html_(self): return self.data._repr_html_()
def __repr__(self): return self.data.__repr__()
class ResearchPapers:
def __init__(self, metadata: pd.DataFrame):
self.metadata = metadata
def __getitem__(self, item):
return Paper(self.metadata.iloc[item])
def __len__(self):
return len(self.metadata)
def head(self, n):
return ResearchPapers(self.metadata.head(n).copy().reset_index(drop=True))
def tail(self, n):
return ResearchPapers(self.metadata.tail(n).copy().reset_index(drop=True))
def abstracts(self):
return self.metadata.abstract.dropna()
def titles(self):
return self.metadata.title.dropna()
def _repr_html_(self):
return self.metadata._repr_html_()
class Paper:
'''
A single research paper
'''
def __init__(self, item):
self.paper = item.to_frame().fillna('')
self.paper.columns = ['Value']
def doi(self):
return self.paper.loc['doi'].values[0]
def html(self):
'''
Load the paper from doi.org and display as HTML. Requires internet to be ON
'''
text = get(self.doi())
return widgets.HTML(text)
def text(self):
'''
Load the paper from doi.org and display as text. Requires Internet to be ON
'''
text = get(self.doi())
return text
def abstract(self):
return self.paper.loc['abstract'].values[0]
def title(self):
return self.paper.loc['title'].values[0]
def authors(self, split=False):
'''
Get a list of authors
'''
authors = self.paper.loc['authors'].values[0]
if not authors:
return []
if not split:
return authors
if authors.startswith('['):
authors = authors.lstrip('[').rstrip(']')
return [a.strip().replace("\'", "") for a in authors.split("\',")]
# Todo: Handle cases where author names are separated by ","
return [a.strip() for a in authors.split(';')]
def _repr_html_(self):
return self.paper._repr_html_()
papers = ResearchPapers(metadata)
'''
SEARCH INDEX
'''
from rank_bm25 import BM25Okapi
nltk.download('stopwords')
english_stopwords = list(set(stopwords.words('english')))
def strip_characters(text):
t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'",'')
return t
def clean(text):
t = text.lower()
t = strip_characters(t)
return t
def tokenize(text):
words = nltk.word_tokenize(text)
return list(set([word for word in words
if len(word) > 1
and not word in english_stopwords
and not (word.isnumeric() and len(word) is not 4)
and (not word.isnumeric() or word.isalpha())] )
)
def preprocess(text):
t = clean(text)
tokens = tokenize(t)
return tokens
class SearchResults:
def __init__(self,
data: pd.DataFrame,
columns = None):
self.results = data
if columns:
self.results = self.results[columns]
def __getitem__(self, item):
return Paper(self.results.loc[item])
def __len__(self):
return len(self.results)
def _repr_html_(self):
return self.results._repr_html_()
SEARCH_DISPLAY_COLUMNS = ['title', 'abstract', 'doi', 'authors', 'journal']
class WordTokenIndex:
def __init__(self,
corpus: pd.DataFrame,
columns=SEARCH_DISPLAY_COLUMNS):
self.corpus = corpus
raw_search_str = self.corpus.abstract.fillna('') + ' ' + self.corpus.title.fillna('')
self.index = raw_search_str.apply(preprocess).to_frame()
self.index.columns = ['terms']
self.index.index = self.corpus.index
self.columns = columns
def search(self, search_string):
search_terms = preprocess(search_string)
result_index = self.index.terms.apply(lambda terms: any(i in terms for i in search_terms))
results = self.corpus[result_index].copy().reset_index().rename(columns={'index':'paper'})
return SearchResults(results, self.columns + ['paper'])
'''
RANK SEARCH INDEX CLASS
'''
class RankBM25Index(WordTokenIndex):
def __init__(self, corpus: pd.DataFrame, columns=SEARCH_DISPLAY_COLUMNS):
super().__init__(corpus, columns)
self.bm25 = BM25Okapi(self.index.terms.tolist())
def search(self, search_string, n=10):
search_terms = preprocess(search_string)
doc_scores = self.bm25.get_scores(search_terms)
ind = np.argsort(doc_scores)[::-1][:n]
results = self.corpus.iloc[ind][self.columns]
results['Score'] = doc_scores[ind]
results = results[results.Score > 0]
return SearchResults(results.reset_index(), self.columns + ['Score'])
'''
CREATE INDEX
'''
print("Creating index...")
bm25_index = RankBM25Index(metadata.head(len(metadata)))
'''
SAVING FILE
'''
import pickle
import datetime
now = datetime.datetime.now()
file_name='index'+now.strftime('%Y%m%d%H%M')+'.pickle'
print("saving index file...: "+file_name)
with open(file_name, 'wb') as f:
pickle.dump(bm25_index, f)
print("saved file: "+file_name)
``` |
{
"source": "JoseHernandez9094/CohortLexicase",
"score": 3
} |
#### File: CohortLexicase/Summarize/agg_prog_cnt.py
```python
SELECTION = ['DOWN_SAMPLE_TESTS', 'TRUNCATED', 'PROG_ONLY_COHORT_LEX']
file_gen = "Problem__Generations__"
file_eva = "Problem__Evaluations__"
import argparse, csv
import pandas as pd
def main():
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("data_directory", type=str, help="Target experiment directory.")
args = parser.parse_args()
data_directory = args.data_directory
data_directory = data_directory.strip()
#Collect all the full lexicase data for generations
df = pd.read_csv(data_directory+"Problem__Generations__COHORT_LEX.csv")
df = df.values.tolist()
GEN_PROB = []
GEN_DIM = []
GEN_CNT = []
for row in df:
prob = row[1] #str
dim = row[2] #str
cnt = int(row[3])
if dim == 'cn1_cs512':
GEN_PROB.append(prob)
GEN_DIM.append(dim)
GEN_CNT.append(cnt)
raw_data = {'problem':GEN_PROB, 'dims':GEN_DIM, 'count':GEN_CNT}
df_gen = pd.DataFrame(raw_data, columns=['problem', 'dims', 'count'])
for p in SELECTION:
df_gen.to_csv(data_directory+"Problem__Generations__"+p+".csv", header=False, mode = 'a')
#Collect Data for EVALUATIONS
df = pd.read_csv(data_directory+"Problem__Evaluations__COHORT_LEX.csv")
df = df.values.tolist()
EVE_PROB = []
EVE_DIM = []
EVE_CNT = []
for row in df:
prob = row[1] #str
dim = row[2] #str
cnt = int(row[3])
if dim == 'cn1_cs512':
EVE_PROB.append(prob)
EVE_DIM.append(dim)
EVE_CNT.append(cnt)
raw_data = {'problem':EVE_PROB, 'dims':EVE_DIM, 'count':EVE_CNT}
df_gen = pd.DataFrame(raw_data, columns=['problem', 'dims', 'count'])
for p in SELECTION:
df_gen.to_csv(data_directory+"Problem__Evaluations__"+p+".csv", header=False, mode = 'a')
if __name__ == "__main__":
main()
```
#### File: CohortLexicase/Summarize/solution_cnt_eval.py
```python
SELECTION = {'DOWN_SAMPLE_TESTS':'Down Sample', 'TRUNCATED':'Truncated', 'PROG_ONLY_COHORT_LEX':'Prog-Only Cohort', 'COHORT_LEX':'Cohort-Lexicase', 'FULL_LEXICASE':'Lexicase'}
POS_TREATMENT=0
POS_SOLUTION=4
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("data_directory", type=str, help="Target experiment directory.")
parser.add_argument("dump_directory", type=str, help="Target dump directory")
args = parser.parse_args()
data_directory = args.data_directory
write_directory = args.dump_directory
df = pd.read_csv(data_directory)
df = df.values.tolist()
count = {}
for row in df:
treat = row[POS_TREATMENT].split('__')
prob = treat[0][len('PROBLEM_'):]
sel = treat[1][4:]
cn = int(treat[2].strip('CN_'))
cnt = row[4]
if cn not in count:
count[cn] = {}
if prob not in count[cn]:
count[cn][prob] = {}
if sel not in count[cn][prob]:
count[cn][prob][sel] = cnt
else:
count[cn][prob][sel] = cnt
else:
if sel not in count[cn][prob]:
count[cn][prob][sel] = cnt
else:
count[cn][prob][sel] = cnt
else:
if prob not in count[cn]:
count[cn][prob] = {}
if sel not in count[cn][prob]:
count[cn][prob][sel] = cnt
else:
count[cn][prob][sel] = cnt
else:
if sel not in count[cn][prob]:
count[cn][prob][sel] = cnt
else:
count[cn][prob][sel] = cnt
FULL_LEX = 1
# Used to verify that the dictionary is being set correctly
for cn in count.keys():
problem = []
selection = []
counter = []
print(cn, ': ')
for prob in count[cn].keys():
if prob == 'sum-of squares':
continue
print(' ', prob, ': ')
for sel,cnt in count[cn][prob].items():
problem.append(prob)
selection.append(SELECTION[sel])
counter.append(cnt)
print(' ', sel, ': ', cnt)
#Add full lexicase to all data
if cn != FULL_LEX:
if prob not in count[FULL_LEX]:
problem.append(prob)
selection.append(SELECTION['FULL_LEXICASE'])
counter.append(0)
else:
problem.append(prob)
selection.append(SELECTION['FULL_LEXICASE'])
counter.append(count[FULL_LEX][prob]['COHORT_LEX'])
raw_data = {'problem':problem, 'selection':selection, 'count':counter}
df = pd.DataFrame(raw_data, columns=['problem', 'selection', 'count'])
df.to_csv(write_directory+'CN_'+str(cn)+'Evaluations'+'.csv')
print()
if __name__ == "__main__":
main()
```
#### File: CohortLexicase/Summarize/violin_gen.py
```python
SELECTION = {'DOWN_SAMPLE_TESTS':'Down Sample', 'TRUNCATED':'Truncated', 'PROG_ONLY_COHORT_LEX':'Prog-Only Cohort', 'COHORT_LEX':'Cohort-Lexicase', 'FULL_LEXICASE':'Lexicase'}
DIMS = {1:"cn1_cs512", 16:'cn16_cs32', 128:'cn128_cs4', 256:'cn256_cs2', 2:'cn2_cs256', 32:'cn32_cs16', 4:'cn4_cs128', 64:'cn64_cs8', 8:'cn8_cs64'}
POS_TREATMENT=0
POS_SOLUTION=4
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("data_directory", type=str, help="Target experiment directory.")
parser.add_argument("dump_directory", type=str, help="Target dump directory")
args = parser.parse_args()
data_directory = args.data_directory
write_directory = args.dump_directory
write_directory = write_directory.strip()
df = pd.read_csv(data_directory)
df = df.values.tolist()
count = {}
count2 = {}
gens = ["Update"]
dims = ["Dimension"]
for row in df:
treat = row[POS_TREATMENT].split('__')
update = (row[8])
prob = treat[0][len('PROBLEM_'):]
sel = treat[1][4:]
cn = int(treat[2].strip('CN_'))
cnt = row[4]
if prob not in count:
if prob == 'sum-of squares':
continue
count[prob] = {}
if sel not in count[prob]:
count[prob][sel] = {}
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if sel not in count[prob]:
count[prob][sel] = {}
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
for prob in count.keys():
if prob == 'sum-of-squares':
continue
print(prob)
for sel in count[prob].keys():
dims = []
gens = []
for cn,cnt in count[prob][sel].items():
for up in cnt:
if up != "NONE":
dims.append(DIMS[cn])
gens.append(int(float(up)))
print(sel)
print(dims, len(dims))
print(gens, len(gens))
print()
raw_data = {'dims':dims, 'gens':gens}
df = pd.DataFrame(raw_data, columns=['dims', 'gens'])
df.to_csv(write_directory+prob+'__'+sel+'__violin__gens.csv')
if __name__ == "__main__":
main()
``` |
{
"source": "JoseHerradez/Django_RestAPI_Krato",
"score": 2
} |
#### File: TechnicalTest/krato/views.py
```python
from rest_framework import viewsets, status, mixins
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from .models import Ciudad, Tienda, Usuario
from .serializers import CiudadSerializer, TiendaSerializer, UsuarioSerializer
class CiudadViewSet(
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet
):
"""
API endpoint que permite a las ciudades ser vistas y listados.
"""
queryset = Ciudad.objects.all()
serializer_class = CiudadSerializer
@detail_route(methods=['get'], url_path='usuarios/(?P<user_id>[0-9]+)')
def get_storesByUser(self, request, pk=None, user_id=None):
"""
Retorna una lista de todas las tiendas que pertenecen
a la ciudad y asociadas al usuario indicado.
"""
ciudad = self.get_object()
tiendas = ciudad.tiendas.all()
tiendas = tiendas.filter(usuarios__pk=user_id)
serializer = TiendaSerializer(tiendas, many=True, context={'request':request})
return Response(serializer.data)
class TiendaViewSet(
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet
):
"""
API endpoint que permite a las tiendas ser vistas y listados.
"""
queryset = Tienda.objects.all()
serializer_class = TiendaSerializer
@detail_route(methods=['get'], url_path='usuarios')
def get_users(self, request, pk=None):
"""
Retorna una lista de todos los usuarios que pertenecen
a la tienda seleccionada.
"""
tienda = self.get_object()
usuarios = tienda.usuarios.all()
serializer = UsuarioSerializer(usuarios, many=True, context={'request':request})
return Response(serializer.data)
class UsuarioViewSet(
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet
):
"""
API endpoint que permite a los usuarios ser vistos y listados.
"""
queryset = Usuario.objects.all()
serializer_class = UsuarioSerializer
@detail_route(methods=['get'], url_path='tiendas')
def get_stores(self, request, pk=None):
"""
Retorna una lista de todas las tiendas que pertenecen
al usuario seleccionado.
"""
usuario = self.get_object()
tiendas = usuario.tiendas.all()
serializer = TiendaSerializer(tiendas, many=True, context={'request':request})
return Response(serializer.data)
``` |
{
"source": "JoseHervas/luna",
"score": 2
} |
#### File: JoseHervas/luna/main.py
```python
import json, os
botParams = json.load(open('config/bot.json'))
userParams = json.load(open('config/user.json'))
logsParams = json.load(open('config/logging.json'))
apiParams = json.load(open('config/apis.json'))
available_commands = json.load(open('utils/commands.json'))
wd = os.path.dirname(os.path.realpath(__file__))
exports = wd + '/exports'
utils = wd + '/utils'
# Logging
import logging, logging.config
logging.config.dictConfig(logsParams)
# Main chatbot UI
from chatterbot import ChatBot
from utils import training, msg_handlers, custom_preprocessors
import telebot
telegram = telebot.TeleBot(apiParams['Telegram']['API-key'])
# Create a new instance of a ChatBot
bot = ChatBot(
botParams['name'],
storage_adapter="chatterbot.storage.SQLStorageAdapter",
database="../database.db",
preprocessors=[
'chatterbot.preprocessors.clean_whitespace'
],
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
}
]
)
# Initial training
if (botParams['initial_training']):
training.train_bot(bot, exports)
# Start a new conversation
CONVERSATION_ID = bot.storage.create_conversation()
print("Type something to start...")
# This will determine the response function to which derive the message
def handle_message(message):
mssg = message[0]
chat_id = mssg.chat.id
username = mssg.chat.username
if (username == userParams['username']):
command = msg_handlers.search_commands(mssg, available_commands)
if (command):
import random
from utils import command_handlers
command_handlers.handle_command(command, mssg, telegram, chat_id, CONVERSATION_ID, bot)
response = random.choice(available_commands[command]['Responses'])
telegram.send_message(chat_id, response)
return
botParams = json.load(open('config/bot.json'))
learn_mode_on = botParams['learn_mode']
if (learn_mode_on):
msg_handlers.learn_new_response(bot, mssg, telegram, CONVERSATION_ID)
else:
msg_handlers.generic_text_message(bot, mssg, telegram)
else:
response = 'Lo siento, no estoy autorizada para responder a tus mensajes.'
telegram.send_message(chat_id, response)
# This will attach the handle_message fuction to every message sent by Telegram
telegram.set_update_listener(handle_message)
while True:
try:
telegram.polling(none_stop=True)
except:
telegram.polling(none_stop=True)
```
#### File: luna/utils/custom_preprocessors.py
```python
import json
from chatterbot.conversation import Statement
botParams = json.load(open('config/bot.json'))
def skip_name(bot, message):
wordList = message.replace('.',' ').replace(',', ' ').split(' ')
if (botParams['name'] in wordList):
wordList.remove(botName)
str1 = ' '.join(wordList)
output = Statement(text=str1)
else:
output = message
return output
``` |
{
"source": "JoseH-Git/web-scraping-challenge",
"score": 3
} |
#### File: web-scraping-challenge/Missions_to_Mars/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
def scrape():
# browser = init_browser()
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
mars = {}
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars["headlines"] = soup.find('div',class_="content_title").get_text()
mars["article"] = soup.find('div',class_="article_teaser_body").get_text()
mars["date"] = soup.find('div', class_="list_date").get_text()
mars_news = []
for x in range(100):
dictionary = {"title":headlines[x].text,"article":article[x].text}
mars_news.append(dictionary)
browser.quit()
# Quit the browser
browser.quit()
url = "https://spaceimages-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
images_list = []
general_scrape_images = soup.body.find('div',class_='thmbgroup')
images = general_scrape_images.find_all('a')
for image in images:
images_list.append(url+image['href'])
images_list
# Quit the browser
browser.quit()
url = 'https://marshemispheres.com/'
browser.visit(url)
html = browser.html
hemisphere_scrape_links = soup.body.find('div',class_='collapsible results')
hemisphere_links = hemisphere_scrape_links.find_all('a',class_="itemLink product-item")
hemisphere_images = []
for link in hemisphere_links:
image_dict = {}
if link.h3 is None:
link_path = link["href"]
browser.visit(url+link_path)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
title = soup.body.find('h2',class_="title").text
hem_image = soup.body.find('img',class_='wide-image')['src']
image_dict["title"] = title
image_dict["img_url"] = url+hem_image
hemisphere_images.append(image_dict)
hemisphere_images
print(hemisphere_images)
``` |
{
"source": "josehoras/Advanced-Lane-Finding",
"score": 2
} |
#### File: josehoras/Advanced-Lane-Finding/plotting_helpers.py
```python
import matplotlib.pyplot as plt
import cv2
# Plotting functions
def plot_calibration(original, corrected):
f, (ax1, ax2) = plt.subplots(1,2, figsize=(8,2.5))
f.tight_layout()
for ax in (ax1, ax2):
ax.axis('off')
ax1.set_title('Original image', fontsize=18)
ax1.imshow(original)
ax2.set_title('Undistorted image', fontsize=18)
ax2.imshow(corrected)
plt.subplots_adjust(left=0.01, right=0.99, top=0.9, bottom=0)
plt.show()
f.savefig("output_images/dist_correct.jpg")
return
def plot_thresholds(undist, gradx, grady, mag_binary, dir_binary, hls_binary, x_mag, combined, dir_hsl):
f, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(3, 3, figsize=(12, 8))
f.tight_layout()
for ax in (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9):
ax.axis('off')
ax1.set_title('Original Image', fontsize=18)
ax1.imshow(undist)
ax2.set_title('mag_bin', fontsize=18)
ax2.imshow(gradx, cmap='gray')
ax3.set_title('dir_bin', fontsize=18)
ax3.imshow(grady, cmap='gray')
ax4.set_title('hls_bin', fontsize=18)
ax4.imshow(mag_binary, cmap='gray')
ax5.set_title('white_bin', fontsize=18)
ax5.imshow(dir_binary, cmap='gray')
ax6.set_title('yellow_bin', fontsize=18)
ax6.imshow(hls_binary, cmap='gray')
ax7.set_title('mag & dir & hls', fontsize=18)
ax7.imshow(x_mag, cmap='gray')
ax8.set_title('Combined', fontsize=20, fontweight='bold')
ax8.imshow(combined, cmap='gray')
ax9.set_title('white or yellow', fontsize=18)
ax9.imshow(dir_hsl, cmap='gray')
plt.subplots_adjust(left=0.01, right=0.99, top=0.9, bottom=0.01)
plt.show()
f.savefig("output_images/thresholds.jpg")
return
def plot_warping(original, warp, src):
# Uncomment this code to visualize the region of interest
# for i in range(len(src)):
# cv2.line(original, (src[i][0], src[i][1]), (src[(i+1)%4][0], src[(i+1)%4][1]), 1, 2)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 2.5))
f.tight_layout()
# for ax in (ax1, ax2):
# ax.axis('off')
plt.subplots_adjust(left=0.05, right=0.99, top=0.9, bottom=0.08)
ax1.set_title('Undistorted Image', fontsize=18)
ax1.imshow(original)
ax2.set_title('Warped Image', fontsize=18)
ax2.imshow(warp)
plt.show()
f.savefig("output_images/warp.jpg")
return
def plot_img(image):
plt.imshow(image)
plt.axis('off')
# plt.tight_layout(pad=0.01, rect=(0,-0.1,1,1.1))
plt.subplots_adjust(left=0.1, right=0.95, top=1, bottom=0)
# plt.margins(y=0)
# plt.savefig("output_images/fit.jpg")
plt.show()
```
#### File: josehoras/Advanced-Lane-Finding/video_pipeline.py
```python
import numpy as np
import pickle
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from image_thresholding import *
from plotting_helpers import *
from line_fit import *
from Line import *
# *** PIPELINE ***
def pipeline(img):
global error_im, skipped_frames
# 1. Correct distorsion
# open distorsion matrix
try:
saved_dist = pickle.load(open('calibrate_camera.p', 'rb'), encoding='latin1')
mtx = saved_dist['mtx']
dist = saved_dist['dist']
except (OSError, IOError): # No progress file yet available
print("No saved distorsion data. Run camera_calibration.py")
# apply correction
undist = cv2.undistort(img, mtx, dist, None, mtx)
# 2. Apply filters to get binary map
ksize = 3
gradx = abs_sobel_thresh(undist, orient='x', sobel_kernel=ksize, thresh=(10, 100))
grady = abs_sobel_thresh(undist, orient='y', sobel_kernel=ksize, thresh=(5, 100))
mag_bin = mag_thresh(undist, sobel_kernel=ksize, mag_thresh=(10, 200))
dir_bin = dir_threshold(undist, sobel_kernel=15, thresh=(0.9, 1.2))
hls_bin = hls_select(img, thresh=(50, 255))
white_bin = white_select(img, thresh=195)
yellow_bin = yellow_select(img)
# combine filters to a final output
combined = np.zeros_like(dir_bin)
combined[((mag_bin == 1) & (dir_bin == 1) & (hls_bin == 1)) |
((white_bin == 1) | (yellow_bin == 1))] = 1
# 3. Define trapezoid points on the road and transform perspective
X = combined.shape[1]
Y = combined.shape[0]
src = np.float32(
[[205, 720],
[1075, 720],
[700, 460],
[580, 460]])
dst = np.float32(
[[300, 720],
[980, 720],
[980, 0],
[300, 0]])
# get perspective transformation matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# warp the result of binary thresholds
warped = cv2.warpPerspective(combined, M, (X,Y), flags=cv2.INTER_LINEAR)
# 4. Get polinomial fit of lines
# if > 4 frames skipped (or first frame, as skipped_frames is initialized to 100) do full search
if skipped_frames > 5:
fit_method = "Boxes"
leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped)
else:
fit_method = "Around fit"
leftx, lefty, rightx, righty, out_img = find_lane_around_fit(warped, left_lane.fit_x, right_lane.fit_x)
# fit polynomials and sanity check
try:
left_fit, right_fit, left_px, right_px, ploty = fit(leftx, lefty, rightx, righty, warped.shape[0])
detected, err_msg = sanity_chk(ploty, left_px, right_px)
except:
detected, err_msg = False, "Empty data"
if detected: skipped_frames = 0
else: skipped_frames += 1
# 5. Calculate distance to center, curvature, and update Line objects
if detected or (fit_method == "Boxes" and err_msg != "Empty data"):
left_curv, right_curv = find_curv(ploty, left_fit, right_fit)
left_lane.update(ploty, left_fit, left_px, left_curv)
right_lane.update(ploty, right_fit, right_px, right_curv)
lane_w = (right_lane.base_pos - left_lane.base_pos) * 3.7/700
offset = (((right_lane.base_pos + left_lane.base_pos) - img.shape[1]) / 2) * 3.7/700
# 6. Plot fitted lanes into original image
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_lane.fit_x, left_lane.fit_y]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_lane.fit_x, right_lane.fit_y])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
# if error save original img to check closely in image pipeline
if 1 < skipped_frames < 3:
mpimg.imsave(err_msg + "_" + str(error_im) + ".jpg", img)
error_im += 1
# Add text
road_curv = (left_lane.curv_avg + right_lane.curv_avg) // 2
if road_curv > 2000:
road_curv_text = "Road curvature: straight"
else:
road_curv_text = "Road curvature: " + str(road_curv) + "m"
side = {True: "left", False: "right"}
offset_txt = "Car is {0:.2f}m {1:s} of center".format(offset, side[offset > 0])
for i, txt in enumerate([road_curv_text, offset_txt]):
cv2.putText(result, txt, (75, 75 * (i+1)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
# Uncomment for debugging messages
# lane_width_txt = "Lane width: %.2f m" % lane_w
# for i, obj, txt in [(1, left_lane, "Left"), (2, right_lane, "Right")]:
# if obj.curv_avg > 2000:
# curv_txt = txt + " curvature: straight"
# else:
# curv_txt = txt + " curvature: " + str(int(obj.curv_avg)) + "m"
# cv2.putText(result,curv_txt, (550, 50 * i), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
# cv2.putText(result, "Skipped frames: " + str(skipped_frames), (550,150), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
# cv2.putText(result, fit_method, (550, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
# if err_msg != "":
# cv2.putText(result, "Error!: " + err_msg, (550, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
return result
# *** MAIN ***
# define global variables to use in the pipeline
left_lane = Line()
right_lane = Line()
error_im = 1
skipped_frames = 100
# load video
clip_name = "challenge_video"
clip1 = VideoFileClip(clip_name + ".mp4")#.subclip(0, 8)
# run video through the pipeline and save output
out_clip = clip1.fl_image(pipeline)
out_clip.write_videofile("output_videos/" + clip_name + "_output.mp4", audio=False)
``` |
{
"source": "josehu07/open-cas-linux-mf",
"score": 2
} |
#### File: internal_plugins/vdbench/__init__.py
```python
import os
import time
from datetime import timedelta
from core.test_run import TestRun
from test_tools import fs_utils
class Vdbench:
def __init__(self, params, config):
print("VDBench plugin initialization")
self.run_time = timedelta(seconds=60)
try:
self.working_dir = config["working_dir"]
self.reinstall = config["reinstall"]
self.source_dir = config["source_dir"]
except Exception:
raise Exception("Missing fields in config! ('working_dir', 'source_dir' and "
"'reinstall' required)")
self.result_dir = os.path.join(self.working_dir, 'result.tod')
def pre_setup(self):
pass
def post_setup(self):
print("VDBench plugin post setup")
if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir):
return
if fs_utils.check_if_directory_exists(self.working_dir):
fs_utils.remove(self.working_dir, True, True)
fs_utils.create_directory(self.working_dir)
TestRun.LOGGER.info("Copying vdbench to working dir.")
fs_utils.copy(os.path.join(self.source_dir, "*"), self.working_dir,
True, True)
pass
def teardown(self):
pass
def create_config(self, config, run_time: timedelta):
self.run_time = run_time
if config[-1] != ",":
config += ","
config += f"elapsed={int(run_time.total_seconds())}"
TestRun.LOGGER.info(f"Vdbench config:\n{config}")
fs_utils.write_file(os.path.join(self.working_dir, "param.ini"), config)
def run(self):
cmd = f"{os.path.join(self.working_dir, 'vdbench')} " \
f"-f {os.path.join(self.working_dir, 'param.ini')} " \
f"-vr -o {self.result_dir}"
full_cmd = f"screen -dmS vdbench {cmd}"
TestRun.executor.run(full_cmd)
start_time = time.time()
timeout = self.run_time * 1.5
while True:
if not TestRun.executor.run(f"ps aux | grep '{cmd}' | grep -v grep").exit_code == 0:
return self.analyze_log()
if time.time() - start_time > timeout.total_seconds():
TestRun.LOGGER.error("Vdbench timeout.")
return False
time.sleep(1)
def analyze_log(self):
output = TestRun.executor.run(
f"ls -1td {self.result_dir[0:len(self.result_dir) - 3]}* | head -1")
log_path = os.path.join(output.stdout if output.exit_code == 0 else self.result_dir,
"logfile.html")
log_file = fs_utils.read_file(log_path)
if "Vdbench execution completed successfully" in log_file:
TestRun.LOGGER.info("Vdbench execution completed successfully.")
return True
if "Data Validation error" in log_file or "data_errors=1" in log_file:
TestRun.LOGGER.error("Data corruption occurred!")
elif "Heartbeat monitor:" in log_file:
TestRun.LOGGER.error("Vdbench: heartbeat.")
else:
TestRun.LOGGER.error("Vdbench unknown result.")
return False
plugin_class = Vdbench
```
#### File: log/group/html_group_log.py
```python
from datetime import datetime
from log.base_log import BaseLog, BaseLogResult
class HtmlGroupLog(BaseLog):
def __init__(self, constructor, html_base_element, cfg, begin_message, id_):
super().__init__(begin_message)
self._successor = None
self.__factory = constructor
self.__log_main_store = html_base_element
self._id = id_
self._container = None
self._header = None
self.__msg_idx = 0
self._start_time = datetime.now()
self._cfg = cfg
self._header_msg_type = type(begin_message)
def begin(self, message):
policy = self._cfg.get_policy(type(message))
self._header, self._container = policy.group_begin(self._id, message, self.__log_main_store)
super().begin(message)
def get_step_id(self):
if self._successor is not None:
return self._successor.get_step_id()
else:
return f'step.{self._id}.{self.__msg_idx}'
def __add_test_step(self, message, result=BaseLogResult.PASSED):
policy = self._cfg.get_policy(type(message))
policy.standard(self.get_step_id(), message, result, self._container)
self.__msg_idx += 1
def get_main_log_store(self):
return self.__log_main_store
def start_group(self, message):
self._header_msg_type = type(message)
if self._successor is not None:
result = self._successor.start_group(message)
else:
new_id = f"{self._id}.{self.__msg_idx}"
self.__msg_idx += 1
self._successor = self.__factory(self._container, self._cfg, message, new_id)
self._successor.begin(message)
result = self._successor
return result
def end_group(self):
if self._successor is not None:
if self._successor._successor is None:
self._successor.end()
result = self._successor
self._successor = None
else:
result = self._successor.end_group()
else:
self.end()
result = self
return result
def debug(self, message):
if self._successor is not None:
self._successor.debug(message)
else:
self.__add_test_step(message, BaseLogResult.DEBUG)
return super().debug(message)
def info(self, message):
if self._successor is not None:
self._successor.info(message)
else:
self.__add_test_step(message)
super().info(message)
def workaround(self, message):
if self._successor is not None:
self._successor.workaround(message)
else:
self.__add_test_step(message, BaseLogResult.WORKAROUND)
super().workaround(message)
def warning(self, message):
if self._successor is not None:
self._successor.warning(message)
else:
self.__add_test_step(message, BaseLogResult.WARNING)
super().warning(message)
def skip(self, message):
if self._successor is not None:
self._successor.skip(message)
else:
self.__add_test_step(message, BaseLogResult.SKIPPED)
super().skip(message)
def error(self, message):
if self._successor is not None:
self._successor.error(message)
else:
self.__add_test_step(message, BaseLogResult.FAILED)
super().error(message)
def blocked(self, message):
if self._successor is not None:
self._successor.blocked(message)
else:
self.__add_test_step(message, BaseLogResult.BLOCKED)
super().blocked(message)
def critical(self, message):
if self._successor is not None:
self._successor.critical(message)
else:
self.__add_test_step(message, BaseLogResult.CRITICAL)
super().critical(message)
def exception(self, message):
if self._successor is not None:
self._successor.exception(message)
else:
self.__add_test_step(message, BaseLogResult.EXCEPTION)
super().exception(message)
def end(self):
return super().end()
def get_current_group(self):
if self._successor is not None:
result = self._successor.get_current_group()
else:
result = self
return result
```
#### File: test-framework/log/html_iteration_log.py
```python
from log.html_file_item_log import HtmlFileItemLog
class HtmlIterationLog(HtmlFileItemLog):
def __init__(self, test_title, iteration_title, config):
self.iteration_closed: bool = False
html_file = config.create_iteration_file()
super().__init__(html_file, test_title, config, iteration_title)
```
#### File: test-framework/log/html_main_log.py
```python
from log.html_file_log import HtmlFileLog
from lxml.etree import Element
class HtmlMainLog(HtmlFileLog):
def __init__(self, title, config):
super().__init__(config.get_main_file_path(), title)
self._config = config
self.__current_iteration_id = None
root = self.get_root()
test_title_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-title"]')[0]
test_title_div.text = title
self.__build_information_set = root.xpath(
'/html/body/div/div/div/div[@id="sidebar-tested-build"]')[0]
def add_build_info(self, message):
build_info = Element("div")
build_info.text = message
self.__build_information_set.append(build_info)
def start_iteration(self, iteration_id):
self.__current_iteration_id = iteration_id
def end_iteration(self):
pass
def end_iteration(self, iteration_result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_iteration(iteration_selector_div,
iteration_selector_select,
self.__current_iteration_id,
iteration_result)
def end_setup_iteration(self, result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')[0]
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_setup_iteration(iteration_selector_div, iteration_selector_select, result)
def end(self, result):
root = self.get_root()
test_status_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-status"]')
self._config.end_main_log(test_status_div, result)
super().end()
```
#### File: test-framework/log/html_setup_log.py
```python
from log.html_file_item_log import HtmlFileItemLog
from log.base_log import BaseLogResult
class HtmlSetupLog(HtmlFileItemLog):
LOG_RESULT = {
BaseLogResult.PASSED: HtmlFileItemLog.info,
BaseLogResult.WORKAROUND: HtmlFileItemLog.workaround,
BaseLogResult.WARNING: HtmlFileItemLog.warning,
BaseLogResult.SKIPPED: HtmlFileItemLog.skip,
BaseLogResult.FAILED: HtmlFileItemLog.error,
BaseLogResult.BLOCKED: HtmlFileItemLog.blocked,
BaseLogResult.EXCEPTION: HtmlFileItemLog.exception,
BaseLogResult.CRITICAL: HtmlFileItemLog.critical}
def __init__(self, test_title, config, iteration_title="Test summary"):
html_file_path = config.get_setup_file_path()
super().__init__(html_file_path, test_title, config, iteration_title)
self._last_iteration_title = ''
def start_iteration(self, message):
self._last_iteration_title = message
def end_iteration(self, iteration_result):
HtmlSetupLog.LOG_RESULT[iteration_result](self, self._last_iteration_title)
def end(self):
super().end()
```
#### File: test-framework/log/presentation_policy.py
```python
class PresentationPolicy:
def __init__(self, standard_log, group_begin_func):
self.standard = standard_log
self.group_begin = group_begin_func
def std_log_entry(msg_id, msg, log_result, html_node):
pass
def group_log_begin(msg_id, msg, html_node):
return html_node, html_node
null_policy = PresentationPolicy(std_log_entry, group_log_begin)
```
#### File: test-framework/storage_devices/disk.py
```python
import itertools
from enum import IntEnum
from test_utils import disk_finder
from test_utils.output import CmdException
from test_utils.size import Unit
from test_tools import disk_utils, fs_utils
from storage_devices.device import Device
from datetime import timedelta
from test_utils.os_utils import wait
from core.test_run import TestRun
import json
import re
class DiskType(IntEnum):
hdd = 0
hdd4k = 1
sata = 2
nand = 3
optane = 4
class DiskTypeSetBase:
def resolved(self):
raise NotImplementedError()
def types(self):
raise NotImplementedError()
def json(self):
return json.dumps({
"type": "set",
"values": [t.name for t in self.types()]
})
def __lt__(self, other):
return min(self.types()) < min(other.types())
def __le__(self, other):
return min(self.types()) <= min(other.types())
def __eq__(self, other):
return min(self.types()) == min(other.types())
def __ne__(self, other):
return min(self.types()) != min(other.types())
def __gt__(self, other):
return min(self.types()) > min(other.types())
def __ge__(self, other):
return min(self.types()) >= min(other.types())
class DiskTypeSet(DiskTypeSetBase):
def __init__(self, *args):
self.__types = set(*args)
def resolved(self):
return True
def types(self):
return self.__types
class DiskTypeLowerThan(DiskTypeSetBase):
def __init__(self, disk_name):
self.__disk_name = disk_name
def resolved(self):
return self.__disk_name in TestRun.disks
def types(self):
if not self.resolved():
raise LookupError("Disk type not resolved!")
disk_type = TestRun.disks[self.__disk_name].disk_type
return set(filter(lambda d: d < disk_type, [*DiskType]))
def json(self):
return json.dumps({
"type": "operator",
"name": "lt",
"args": [self.__disk_name]
})
class Disk(Device):
def __init__(
self,
path,
disk_type: DiskType,
serial_number,
block_size,
):
Device.__init__(self, path)
self.device_name = path.split('/')[-1]
self.serial_number = serial_number
self.block_size = Unit(block_size)
self.disk_type = disk_type
self.partitions = []
def create_partitions(
self,
sizes: [],
partition_table_type=disk_utils.PartitionTable.gpt):
disk_utils.create_partitions(self, sizes, partition_table_type)
def umount_all_partitions(self):
TestRun.LOGGER.info(
f"Umounting all partitions from: {self.system_path}")
cmd = f'umount -l {self.system_path}*?'
TestRun.executor.run(cmd)
def remove_partitions(self):
for part in self.partitions:
if part.is_mounted():
part.unmount()
if disk_utils.remove_partitions(self):
self.partitions.clear()
def is_detected(self):
if self.serial_number:
serial_numbers = disk_finder.get_all_serial_numbers()
if self.serial_number not in serial_numbers:
return False
else:
self.device_name = serial_numbers[self.serial_number]
self.system_path = f"/dev/{self.device_name}"
for part in self.partitions:
part.system_path = disk_utils.get_partition_path(
part.parent_device.system_path, part.number)
return True
elif self.system_path:
output = fs_utils.ls_item(f"{self.system_path}")
return fs_utils.parse_ls_output(output)[0] is not None
raise Exception("Couldn't check if device is detected by the system")
def wait_for_plug_status(self, should_be_visible):
if not wait(lambda: should_be_visible == self.is_detected(),
timedelta(minutes=1),
timedelta(seconds=1)):
raise Exception(f"Timeout occurred while tying to "
f"{'plug' if should_be_visible else 'unplug'} disk.")
def plug(self):
if self.is_detected():
return
self.execute_plug_command()
self.wait_for_plug_status(True)
def unplug(self):
if not self.is_detected():
return
if not self.device_name:
raise Exception("Couldn't unplug disk without disk id in /dev/.")
self.execute_unplug_command()
self.wait_for_plug_status(False)
if self.serial_number:
self.device_name = None
@staticmethod
def plug_all_disks():
TestRun.executor.run_expect_success(NvmeDisk.plug_all_command)
TestRun.executor.run_expect_success(SataDisk.plug_all_command)
def __str__(self):
disk_str = f'system path: {self.system_path}, type: {self.disk_type}, ' \
f'serial: {self.serial_number}, size: {self.size}, ' \
f'block size: {self.block_size}, partitions:\n'
for part in self.partitions:
disk_str += f'\t{part}'
return disk_str
@staticmethod
def create_disk(path,
disk_type: DiskType,
serial_number,
block_size):
if disk_type is DiskType.nand or disk_type is DiskType.optane:
return NvmeDisk(path, disk_type, serial_number, block_size)
else:
return SataDisk(path, disk_type, serial_number, block_size)
class NvmeDisk(Disk):
plug_all_command = "echo 1 > /sys/bus/pci/rescan"
def __init__(self, path, disk_type, serial_number, block_size):
Disk.__init__(self, path, disk_type, serial_number, block_size)
def execute_plug_command(self):
TestRun.executor.run_expect_success(NvmeDisk.plug_all_command)
def execute_unplug_command(self):
if TestRun.executor.run(
f"echo 1 > /sys/block/{self.device_name}/device/remove").exit_code != 0:
output = TestRun.executor.run(
f"echo 1 > /sys/block/{self.device_name}/device/device/remove")
if output.exit_code != 0:
raise CmdException(f"Failed to unplug PCI disk using sysfs.", output)
class SataDisk(Disk):
plug_all_command = "for i in $(find -H /sys/devices/ -path '*/scsi_host/*/scan' -type f); " \
"do echo '- - -' > $i; done;"
def __init__(self, path, disk_type, serial_number, block_size):
self.plug_command = SataDisk.plug_all_command
Disk.__init__(self, path, disk_type, serial_number, block_size)
def execute_plug_command(self):
TestRun.executor.run_expect_success(self.plug_command)
def execute_unplug_command(self):
TestRun.executor.run_expect_success(
f"echo 1 > {self.get_sysfs_properties(self.device_name).full_path}/device/delete")
def get_sysfs_properties(self, device_name):
ls_command = f"$(find -H /sys/devices/ -name {device_name} -type d)"
output = fs_utils.ls_item(f"{ls_command}")
sysfs_addr = fs_utils.parse_ls_output(output)[0]
if not sysfs_addr:
raise Exception(f"Failed to find sysfs address: ls -l {ls_command}")
dirs = sysfs_addr.full_path.split('/')
scsi_address = dirs[-3]
matches = re.search(
r"^(?P<controller>\d+)[-:](?P<port>\d+)[-:](?P<target>\d+)[-:](?P<lun>\d+)$",
scsi_address)
controller_id = matches["controller"]
port_id = matches["port"]
target_id = matches["target"]
lun = matches["lun"]
host_path = "/".join(itertools.takewhile(lambda x: not x.startswith("host"), dirs))
self.plug_command = f"echo '{port_id} {target_id} {lun}' > " \
f"{host_path}/host{controller_id}/scsi_host/host{controller_id}/scan"
return sysfs_addr
```
#### File: test-framework/storage_devices/partition.py
```python
from storage_devices.device import Device
from test_tools import disk_utils
from test_utils.size import Size
class Partition(Device):
def __init__(self, parent_dev, type, number, begin: Size, end: Size):
Device.__init__(self, disk_utils.get_partition_path(parent_dev.system_path, number))
self.number = number
self.parent_device = parent_dev
self.type = type
self.begin = begin
self.end = end
def __str__(self):
return f"\tsystem path: {self.system_path}, size: {self.size}, type: {self.type}, " \
f"parent device: {self.parent_device.system_path}\n"
```
#### File: test-framework/test_tools/device_mapper.py
```python
from enum import Enum
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size, Unit
from storage_devices.device import Device
from core.test_run import TestRun
from test_utils.output import Output
class DmTarget(Enum):
# Fill argument types for other targets if you need them
LINEAR = (str, int)
STRIPED = (int, int, list)
ERROR = ()
ZERO = ()
CRYPT = ()
DELAY = (str, int, int, str, int, int)
FLAKEY = (str, int, int, int)
MIRROR = ()
MULTIPATH = ()
RAID = ()
SNAPSHOT = ()
def __str__(self):
return self.name.lower()
class DmTable:
class TableEntry:
pass
class DmTable:
class TableEntry:
def __init__(self, offset: int, length: int, target: DmTarget, *params):
self.offset = int(offset)
self.length = int(length)
self.target = DmTarget(target)
self.params = list(params)
self.validate()
def validate(self):
if self.target.value:
for i in range(len(self.params)):
try:
self.params[i] = self.target.value[i](self.params[i])
except IndexError:
raise ValueError("invalid dm target parameter")
def __str__(self):
ret = f"{self.offset} {self.length} {self.target}"
for param in self.params:
ret += f" {param}"
return ret
def __init__(self):
self.table = []
@classmethod
def uniform_error_table(
cls, start_lba: int, stop_lba: int, num_error_zones: int, error_zone_size: Size
):
table = cls()
increment = (stop_lba - start_lba) // num_error_zones
for zone_start in range(start_lba, stop_lba, increment):
table.add_entry(
DmTable.TableEntry(
zone_start,
error_zone_size.get_value(Unit.Blocks512),
DmTarget.ERROR,
)
)
return table
@classmethod
def passthrough_table(cls, device: Device):
table = cls()
table.add_entry(
DmTable.TableEntry(
0,
device.size.get_value(Unit.Blocks512),
DmTarget.LINEAR,
device.system_path,
0,
)
)
return table
@classmethod
def error_table(cls, offset: int, size: Size):
table = cls()
table.add_entry(
DmTable.TableEntry(offset, size.get_value(Unit.Blocks512), DmTarget.ERROR)
)
return table
def fill_gaps(self, device: Device, fill_end=True):
gaps = self.get_gaps()
for gap in gaps[:-1]:
self.add_entry(
DmTable.TableEntry(
gap[0], gap[1], DmTarget.LINEAR, device.system_path, int(gap[0])
)
)
table_end = gaps[-1][0]
if fill_end and (Size(table_end, Unit.Blocks512) < device.size):
self.add_entry(
DmTable.TableEntry(
table_end,
device.size.get_value(Unit.Blocks512) - table_end,
DmTarget.LINEAR,
device.system_path,
table_end,
)
)
return self
def add_entry(self, entry: DmTable.TableEntry):
self.table.append(entry)
return self
def get_gaps(self):
if not self.table:
return [(0, -1)]
gaps = []
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
gaps.append((0, self.table[0].offset))
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
gaps.append(
(e1.offset + e1.length, e2.offset - (e1.offset + e1.length))
)
if len(self.table) > 1:
gaps.append((e2.offset + e2.length, -1))
else:
gaps.append((self.table[0].offset + self.table[0].length, -1))
return gaps
def validate(self):
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
raise ValueError(f"dm table should start at LBA 0: {self.table[0]}")
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
raise ValueError(
f"dm table should not have any holes or overlaps: {e1} -> {e2}"
)
def get_size(self):
self.table.sort(key=lambda entry: entry.offset)
return Size(self.table[-1].offset + self.table[-1].length, Unit.Blocks512)
def __str__(self):
output = ""
for entry in self.table:
output += f"{entry}\n"
return output
class DeviceMapper(LinuxCommand):
@classmethod
def remove_all(cls, force=True):
TestRun.LOGGER.info("Removing all device mapper devices")
cmd = "dmsetup remove_all"
if force:
cmd += " --force"
return TestRun.executor.run_expect_success(cmd)
def __init__(self, name: str):
LinuxCommand.__init__(self, TestRun.executor, "dmsetup")
self.name = name
@staticmethod
def wrap_table(table: DmTable):
return f"<< ENDHERE\n{str(table)}ENDHERE\n"
def get_path(self):
return f"/dev/mapper/{self.name}"
def clear(self):
return TestRun.executor.run_expect_success(f"{self.command_name} clear {self.name}")
def create(self, table: DmTable):
try:
table.validate()
except ValueError:
for entry in table.table:
TestRun.LOGGER.error(f"{entry}")
raise
TestRun.LOGGER.info(f"Creating device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} create {self.name} {self.wrap_table(table)}"
)
def remove(self):
TestRun.LOGGER.info(f"Removing device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} remove {self.name}")
def suspend(self):
TestRun.LOGGER.info(f"Suspending device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} suspend {self.name}")
def resume(self):
TestRun.LOGGER.info(f"Resuming device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} resume {self.name}")
def reload(self, table: DmTable):
table.validate()
TestRun.LOGGER.info(f"Reloading table for device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} reload {self.name} {self.wrap_table(table)}"
)
class ErrorDevice(Device):
def __init__(self, name: str, base_device: Device, table: DmTable = None):
self.device = base_device
self.mapper = DeviceMapper(name)
self.name = name
self.table = DmTable.passthrough_table(base_device) if not table else table
self.active = False
self.start()
@property
def system_path(self):
if self.active:
output = TestRun.executor.run_expect_success(f"realpath {self.mapper.get_path()}")
return output.stdout
return None
@property
def size(self):
if self.active:
return self.table.get_size()
return None
def start(self):
self.mapper.create(self.table)
self.active = True
def stop(self):
self.mapper.remove()
self.active = False
def change_table(self, table: DmTable, permanent=True):
if self.active:
self.mapper.suspend()
self.mapper.reload(table)
self.mapper.resume()
if permanent:
self.table = table
def suspend_errors(self):
empty_table = DmTable.passthrough_table(self.device)
TestRun.LOGGER.info(f"Suspending issuing errors for error device '{self.name}'")
self.change_table(empty_table, False)
def resume_errors(self):
TestRun.LOGGER.info(f"Resuming issuing errors for error device '{self.name}'")
self.change_table(self.table, False)
def suspend(self):
if not self.active:
TestRun.LOGGER.warning(
f"cannot suspend error device '{self.name}'! It's already running"
)
self.mapper.suspend()
self.active = False
def resume(self):
if self.active:
TestRun.LOGGER.warning(
f"cannot resume error device '{self.name}'! It's already running"
)
self.mapper.resume()
self.active = True
```
#### File: test-framework/test_tools/fs_utils.py
```python
import base64
import math
import textwrap
from datetime import datetime
from aenum import IntFlag, Enum
from core.test_run import TestRun
from test_tools.dd import Dd
from test_utils.size import Size, Unit
class Permissions(IntFlag):
r = 4
w = 2
x = 1
def __str__(self):
ret_string = ""
for p in Permissions:
if p in self:
ret_string += p.name
return ret_string
class PermissionsUsers(IntFlag):
u = 4
g = 2
o = 1
def __str__(self):
ret_string = ""
for p in PermissionsUsers:
if p in self:
ret_string += p.name
return ret_string
class PermissionSign(Enum):
add = '+'
remove = '-'
set = '='
def create_directory(path, parents: bool = False):
cmd = f"mkdir {'--parents ' if parents else ''}{path}"
return TestRun.executor.run_expect_success(cmd)
def check_if_directory_exists(path):
return TestRun.executor.run(f"test -d {path}").exit_code == 0
def check_if_file_exists(path):
return TestRun.executor.run(f"test -e {path}").exit_code == 0
def copy(source: str,
destination: str,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
cmd = f"cp{' --force' if force else ''}" \
f"{' --recursive' if recursive else ''}" \
f"{' --dereference' if dereference else ''} " \
f"{source} {destination}"
return TestRun.executor.run_expect_success(cmd)
def move(source, destination, force: bool = False):
cmd = f"mv{' --force' if force else ''} {source} {destination}"
return TestRun.executor.run_expect_success(cmd)
def remove(path, force: bool = False, recursive: bool = False, ignore_errors: bool = False):
cmd = f"rm{' --force' if force else ''}{' --recursive' if recursive else ''} {path}"
output = TestRun.executor.run(cmd)
if output.exit_code != 0 and not ignore_errors:
raise Exception(f"Could not remove file {path}."
f"\nstdout: {output.stdout}\nstderr: {output.stderr}")
return output
def chmod(path, permissions: Permissions, users: PermissionsUsers,
sign: PermissionSign = PermissionSign.set, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} " \
f"{str(users)}{sign.value}{str(permissions)} {path}"
output = TestRun.executor.run(cmd)
return output
def chmod_numerical(path, permissions: int, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} {permissions} {path}"
return TestRun.executor.run_expect_success(cmd)
def chown(path, owner, group, recursive):
cmd = f"chown {'--recursive ' if recursive else ''}{owner}:{group} {path}"
return TestRun.executor.run_expect_success(cmd)
def create_file(path):
if not path.strip():
raise ValueError("Path cannot be empty or whitespaces.")
cmd = f"touch '{path}'"
return TestRun.executor.run_expect_success(cmd)
def compare(file, other_file):
output = TestRun.executor.run(
f"cmp --silent {file} {other_file}")
if output.exit_code == 0:
return True
elif output.exit_code > 1:
raise Exception(f"Compare command execution failed. {output.stdout}\n{output.stderr}")
else:
return False
def diff(file, other_file):
output = TestRun.executor.run(
f"diff {file} {other_file}")
if output.exit_code == 0:
return None
elif output.exit_code > 1:
raise Exception(f"Diff command execution failed. {output.stdout}\n{output.stderr}")
else:
return output.stderr
# For some reason separators other than '/' don't work when using sed on system paths
# This requires escaping '/' in pattern and target string
def escape_sed_string(string: str, sed_replace: bool = False):
string = string.replace("'", r"\x27").replace("/", r"\/")
# '&' has special meaning in sed replace and needs to be escaped
if sed_replace:
string = string.replace("&", r"\&")
return string
def insert_line_before_pattern(file, pattern, new_line):
pattern = escape_sed_string(pattern)
new_line = escape_sed_string(new_line)
cmd = f"sed -i '/{pattern}/i {new_line}' {file}"
return TestRun.executor.run_expect_success(cmd)
def replace_first_pattern_occurrence(file, pattern, new_string):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i '0,/{pattern}/s//{new_string}/' {file}"
return TestRun.executor.run_expect_success(cmd)
def replace_in_lines(file, pattern, new_string, regexp=False):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i{' -r' if regexp else ''} 's/{pattern}/{new_string}/g' {file}"
return TestRun.executor.run_expect_success(cmd)
def append_line(file, string):
cmd = f"echo '{string}' >> {file}"
return TestRun.executor.run_expect_success(cmd)
def remove_lines(file, pattern, regexp=False):
pattern = escape_sed_string(pattern)
cmd = f"sed -i{' -r' if regexp else ''} '/{pattern}/d' {file}"
return TestRun.executor.run_expect_success(cmd)
def read_file(file):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
output = TestRun.executor.run_expect_success(f"cat {file}")
return output.stdout
def write_file(file, content, overwrite: bool = True, unix_line_end: bool = True):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
if not content:
raise ValueError("Content cannot be empty.")
if unix_line_end:
content.replace('\r', '')
content += '\n'
max_length = 60000
split_content = textwrap.TextWrapper(width=max_length, replace_whitespace=False).wrap(content)
split_content[-1] += '\n'
for s in split_content:
redirection_char = '>' if overwrite else '>>'
overwrite = False
encoded_content = base64.b64encode(s.encode("utf-8"))
cmd = f"printf '{encoded_content.decode('utf-8')}' " \
f"| base64 --decode {redirection_char} {file}"
TestRun.executor.run_expect_success(cmd)
def uncompress_archive(file, destination=None):
from test_utils.filesystem.file import File
if not isinstance(file, File):
file = File(file)
if not destination:
destination = file.parent_dir
command = (f"unzip -u {file.full_path} -d {destination}"
if str(file).endswith(".zip")
else f"tar --extract --file={file.full_path} --directory={destination}")
TestRun.executor.run_expect_success(command)
def ls(path, options=''):
default_options = "-lA --time-style=+'%Y-%m-%d %H:%M:%S'"
output = TestRun.executor.run(
f"ls {default_options} {options} {path}")
return output.stdout
def ls_item(path):
output = ls(path, '-d')
return output.splitlines()[0] if output else None
def parse_ls_output(ls_output, dir_path=''):
split_output = ls_output.split('\n')
fs_items = []
for line in split_output:
if not line.strip():
continue
line_fields = line.split()
if len(line_fields) < 8:
continue
file_type = line[0]
if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
continue
permissions = line_fields[0][1:].replace('.', '')
owner = line_fields[2]
group = line_fields[3]
size = Size(float(line_fields[4]), Unit.Byte)
split_date = line_fields[5].split('-')
split_time = line_fields[6].split(':')
modification_time = datetime(int(split_date[0]), int(split_date[1]), int(split_date[2]),
int(split_time[0]), int(split_time[1]), int(split_time[2]))
if dir_path and file_type != 'l':
full_path = '/'.join([dir_path, line_fields[7]])
else:
full_path = line_fields[7]
from test_utils.filesystem.file import File, FsItem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.symlink import Symlink
if file_type == '-':
fs_item = File(full_path)
elif file_type == 'd':
fs_item = Directory(full_path)
elif file_type == 'l':
fs_item = Symlink(full_path)
else:
fs_item = FsItem(full_path)
fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))] \
if permissions[:3] != '---' else Permissions(0)
fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))] \
if permissions[3:6] != '---' else Permissions(0)
fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))] \
if permissions[6:] != '---' else Permissions(0)
fs_item.owner = owner
fs_item.group = group
fs_item.size = size
fs_item.modification_time = modification_time
fs_items.append(fs_item)
return fs_items
def create_random_test_file(target_file_path: str,
file_size: Size = Size(1, Unit.MebiByte),
random: bool = True):
from test_utils.filesystem.file import File
bs = Size(512, Unit.KibiByte)
cnt = math.ceil(file_size.value / bs.value)
file = File.create_file(target_file_path)
dd = Dd().output(target_file_path) \
.input("/dev/urandom" if random else "/dev/zero") \
.block_size(bs) \
.count(cnt) \
.oflag("direct")
dd.run()
file.refresh_item()
return file
```
#### File: test-framework/test_utils/singleton.py
```python
class Singleton(type):
"""
Singleton class
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
```
#### File: test-framework/test_utils/size.py
```python
import enum
import math
from multimethod import multimethod
def parse_unit(str_unit: str):
for u in Unit:
if str_unit == u.name:
return u
if str_unit == "KiB":
return Unit.KibiByte
elif str_unit in ["4KiB blocks", "4KiB Blocks"]:
return Unit.Blocks4096
elif str_unit == "MiB":
return Unit.MebiByte
elif str_unit == "GiB":
return Unit.GibiByte
elif str_unit == "TiB":
return Unit.TebiByte
if str_unit == "B":
return Unit.Byte
elif str_unit == "KB":
return Unit.KiloByte
elif str_unit == "MB":
return Unit.MegaByte
elif str_unit == "GB":
return Unit.GigaByte
elif str_unit == "TB":
return Unit.TeraByte
raise ValueError(f"Unable to parse {str_unit}")
class Unit(enum.Enum):
Byte = 1
KiloByte = 1000
KibiByte = 1024
MegaByte = 1000 * KiloByte
MebiByte = 1024 * KibiByte
GigaByte = 1000 * MegaByte
GibiByte = 1024 * MebiByte
TeraByte = 1000 * GigaByte
TebiByte = 1024 * GibiByte
Blocks512 = 512
Blocks4096 = 4096
KiB = KibiByte
KB = KiloByte
MiB = MebiByte
MB = MegaByte
GiB = GibiByte
GB = GigaByte
TiB = TebiByte
TB = TeraByte
def get_value(self):
return self.value
class UnitPerSecond:
def __init__(self, unit):
self.value = unit.get_value()
self.name = unit.name + "/s"
def get_value(self):
return self.value
class Size:
def __init__(self, value: float, unit: Unit = Unit.Byte):
if value < 0:
raise ValueError("Size has to be positive.")
self.value = value * unit.value
self.unit = unit
def __str__(self):
return f"{self.get_value(self.unit)} {self.unit.name}"
def __hash__(self):
return self.value.__hash__()
def __int__(self):
return int(self.get_value())
def __add__(self, other):
return Size(self.get_value() + other.get_value())
def __lt__(self, other):
return self.get_value() < other.get_value()
def __le__(self, other):
return self.get_value() <= other.get_value()
def __eq__(self, other):
return self.get_value() == other.get_value()
def __ne__(self, other):
return self.get_value() != other.get_value()
def __gt__(self, other):
return self.get_value() > other.get_value()
def __ge__(self, other):
return self.get_value() >= other.get_value()
def __sub__(self, other):
if self < other:
raise ValueError("Subtracted value is too big. Result size cannot be negative.")
return Size(self.get_value() - other.get_value())
@multimethod
def __mul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __mul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __truediv__(self, other):
if other.get_value() == 0:
raise ValueError("Divisor must not be equal to 0.")
return self.get_value() / other.get_value()
@multimethod
def __truediv__(self, other: int):
if other == 0:
raise ValueError("Divisor must not be equal to 0.")
return Size(math.ceil(self.get_value() / other))
def set_unit(self, new_unit: Unit):
new_size = Size(self.get_value(target_unit=new_unit), unit=new_unit)
if new_size != self:
raise ValueError(f"{new_unit} is not precise enough for {self}")
self.value = new_size.value
self.unit = new_size.unit
return self
def get_value(self, target_unit: Unit = Unit.Byte):
return self.value / target_unit.value
def is_zero(self):
if self.value == 0:
return True
else:
return False
def align_up(self, alignment):
if self == self.align_down(alignment):
return Size(int(self))
return Size(int(self.align_down(alignment)) + alignment)
def align_down(self, alignment):
if alignment <= 0:
raise ValueError("Alignment must be a positive value!")
if alignment & (alignment - 1):
raise ValueError("Alignment must be a power of two!")
return Size(int(self) & ~(alignment - 1))
@staticmethod
def zero():
return Size(0)
``` |
{
"source": "josehu07/prefa-master",
"score": 4
} |
#### File: josehu07/prefa-master/simple-lexer.py
```python
from prefa import dfa, ere
class Lexer(object):
"""Simple example of a toy lexer.
Using a bunch of recognizers (which are minimal DFAs) to perform very
simple lexing analysis.
"""
def __init__(self, rules):
self.recognizers = dict([(r, dfa.DFiniteAutomata(ere.Regex(rules[r])) \
.minimalDFA()) for r in rules])
def tokenize(self, input_str):
"""Tokenize the input string.
Tokenize the input string according to the lexing rules. First split
the input according to whitespaces, then check every element using
the recognizers.
Args:
input_str - str, the string to perform lexing
Returns:
output_str - str, the result of tokenizing
"""
elements, output_str = input_str.strip().split(), ''
for s in elements:
match_flag = False
for r in self.recognizers:
if self.recognizers[r].simulate(s):
output_str += '%10s: %s\n' % (r, s)
match_flag = True
break
if not match_flag: # Will raise ValueError if no matching.
raise ValueError('No matching rule for %r' % s)
print(output_str)
if __name__ == '__main__':
rules = {
'L-FORMAL': '<',
'R-FORMAL': '>',
'L-BODY': '{',
'R-BODY': '}',
'BOOL-AND': '&&',
'COMMA': ',',
'ASSIGN': '<=',
'DEF': 'def',
'SELF': 'self',
'RET': 'return',
'INT': '[0-9]+',
'ID': '[A-Z]+'
}
program = '''
def AND < self , X , Y > {
X <= 1
Y <= 99
return X && Y
}
'''
Lexer(rules).tokenize(program)
``` |
{
"source": "josehu07/SplitFS",
"score": 2
} |
#### File: Documentation/sphinx/load_config.py
```python
import os
import sys
from sphinx.util.pycompat import execfile_
# ------------------------------------------------------------------------------
def loadConfig(namespace):
# ------------------------------------------------------------------------------
u"""Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
configuration values from the origin ``conf.py``. With this you are able to
maintain *build themes*. """
config_file = os.environ.get("SPHINX_CONF", None)
if (config_file is not None
and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
config_file = os.path.abspath(config_file)
# Let's avoid one conf.py file just due to latex_documents
start = config_file.find('Documentation/')
if start >= 0:
start = config_file.find('/', start + 1)
end = config_file.rfind('/')
if start >= 0 and end > 0:
dir = config_file[start + 1:end]
print("source directory: %s" % dir)
new_latex_docs = []
latex_documents = namespace['latex_documents']
for l in latex_documents:
if l[0].find(dir + '/') == 0:
has = True
fn = l[0][len(dir) + 1:]
new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
break
namespace['latex_documents'] = new_latex_docs
# If there is an extra conf.py file, load it
if os.path.isfile(config_file):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
config['__file__'] = config_file
execfile_(config_file, config)
del config['__file__']
namespace.update(config)
else:
config = namespace.copy()
config['tags'].add("subproject")
namespace.update(config)
```
#### File: gdb/linux/config.py
```python
import gdb
import zlib
from linux import utils
class LxConfigDump(gdb.Command):
"""Output kernel config to the filename specified as the command
argument. Equivalent to 'zcat /proc/config.gz > config.txt' on
a running target"""
def __init__(self):
super(LxConfigDump, self).__init__("lx-configdump", gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
def invoke(self, arg, from_tty):
if len(arg) == 0:
filename = "config.txt"
else:
filename = arg
try:
py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8")
py_config_size = gdb.parse_and_eval(
"sizeof(kernel_config_data) - 1 - 8 * 2")
except gdb.error as e:
raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?")
inf = gdb.inferiors()[0]
zconfig_buf = utils.read_memoryview(inf, py_config_ptr,
py_config_size).tobytes()
config_buf = zlib.decompress(zconfig_buf, 16)
with open(filename, 'wb') as f:
f.write(config_buf)
gdb.write("Dumped config to " + filename + "\n")
LxConfigDump()
```
#### File: gdb/linux/dmesg.py
```python
import gdb
import sys
from linux import utils
printk_log_type = utils.CachedType("struct printk_log")
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval(
"(void *)'printk.c'::log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = utils.read_memoryview(inf, start, length).tobytes()
else:
log_buf_2nd_half = log_buf_len - log_first_idx
a = utils.read_memoryview(inf, start, log_buf_2nd_half)
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
length_offset = printk_log_type.get_type()['len'].bitpos // 8
text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
text_offset = printk_log_type.get_type().sizeof
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf, pos + length_offset)
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf, pos + text_len_offset)
text_start = pos + text_offset
text = log_buf[text_start:text_start + text_len].decode(
encoding='utf8', errors='replace')
time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line)
# With python2 gdb.write will attempt to convert unicode to
# ascii and might fail so pass an utf8-encoded str instead.
if sys.hexversion < 0x03000000:
msg = msg.encode(encoding='utf8', errors='replace')
gdb.write(msg)
pos += length
LxDmesg()
```
#### File: gdb/linux/genpd.py
```python
import gdb
import sys
from linux.utils import CachedType
from linux.lists import list_for_each_entry
generic_pm_domain_type = CachedType('struct generic_pm_domain')
pm_domain_data_type = CachedType('struct pm_domain_data')
device_link_type = CachedType('struct device_link')
def kobject_get_path(kobj):
path = kobj['name'].string()
parent = kobj['parent']
if parent:
path = kobject_get_path(parent) + '/' + path
return path
def rtpm_status_str(dev):
if dev['power']['runtime_error']:
return 'error'
if dev['power']['disable_depth']:
return 'unsupported'
_RPM_STATUS_LOOKUP = [
"active",
"resuming",
"suspended",
"suspending"
]
return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']]
class LxGenPDSummary(gdb.Command):
'''Print genpd summary
Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
def __init__(self):
super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA)
def summary_one(self, genpd):
if genpd['status'] == 0:
status_string = 'on'
else:
status_string = 'off-{}'.format(genpd['state_idx'])
slave_names = []
for link in list_for_each_entry(
genpd['master_links'],
device_link_type.get_type().pointer(),
'master_node'):
slave_names.apend(link['slave']['name'])
gdb.write('%-30s %-15s %s\n' % (
genpd['name'].string(),
status_string,
', '.join(slave_names)))
# Print devices in domain
for pm_data in list_for_each_entry(genpd['dev_list'],
pm_domain_data_type.get_type().pointer(),
'list_node'):
dev = pm_data['dev']
kobj_path = kobject_get_path(dev['kobj'])
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
def invoke(self, arg, from_tty):
gdb.write('domain status slaves\n');
gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n');
for genpd in list_for_each_entry(
gdb.parse_and_eval('&gpd_list'),
generic_pm_domain_type.get_type().pointer(),
'gpd_list_node'):
self.summary_one(genpd)
LxGenPDSummary()
```
#### File: gdb/linux/symbols.py
```python
import gdb
import os
import re
from linux import modules, utils
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko") or name.endswith(".ko.debug"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
".text", ".text.hot", ".text.unlikely"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
if utils.is_target_arch('s390'):
# Module text is preceded by PLT stubs on s390.
module_arch = module['arch']
plt_offset = int(module_arch['plt_offset'])
plt_size = int(module_arch['plt_size'])
module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
orig_vmlinux = 'vmlinux'
for obj in gdb.objfiles():
if obj.filename.endswith('vmlinux'):
orig_vmlinux = obj.filename
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file {0}".format(orig_vmlinux))
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
```
#### File: SplitFS/splitfs/add_license.py
```python
import re
import sys
import os
def add_license(filename, lines):
print "Add license to ", filename
fd = open(filename, 'r')
lines1 = fd.read()
lines2 = lines + lines1
lines2 = lines2.replace('\r', '')
fd.close()
fd = open(filename, 'w')
fd.write(lines2)
fd.close()
def main():
if len(sys.argv) < 2:
print "Usage: $python add_license.py license_file"
sys.exit(0)
license = sys.argv[1]
print "License:", license
fd = open(license, 'rb')
lines = fd.read()
for root, dirs, files in os.walk("."):
for file1 in files:
name = os.path.join(root, file1)
if name.endswith(".c"): add_license(name, lines)
if name.endswith(".h"): add_license(name, lines)
return
main()
``` |
{
"source": "josehuillca/GeoComp_TrabalhoFinal",
"score": 3
} |
#### File: GeoComp_TrabalhoFinal/quadtree/mesh.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.spatial import Delaunay
from quadtree.point import Point
from quadtree.utils import find_children, get_neighbor_of_greater_or_equal_size, Direction, deeper, DEBUG_MODE, is_inside_polygon, node_is_inside_polygon, contains
class Mesh():
def __init__(self, qtree):
self.qtree = qtree
self.triangles = []
self.pts_inside_contour = []
self.nodes_inside_polygon = []
def mesh_generation(self):
c = find_children(self.qtree.root)
for u in c:
d = u.depth
dN = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.N))
dS = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.S))
dW = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.W))
dE = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.E))
x, y = u.x0, u.y0
w_, h_ = u.width, u.height
#print(d, dN, dS, dW, dE)
# Padrao #1 -------------------------
if d>=dN and d>=dS and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# Padrao #2 -------------------------
if dW>d and d>=dN and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
if dE>d and d>=dN and d>=dS and d>=dW:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
if dN>d and d>=dW and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
if dS>d and d>=dN and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# triangle 3
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# Padrao #3 -------------------------
if dN>d and dW>d and d>=dS and d>=dE: #----1
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 4
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 6
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dE>d and d>=dS and d>=dW: #----2
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_])
# triangle 6
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_/2.])
if dS>d and dE>d and d>=dW and d>=dN: #----3
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
if dS>d and dW>d and d>=dN and d>=dE: #----4
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
# Padrao #4 -------------------------
if dW>d and dE>d and d>=dS and d>=dN:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x, y + h_/2.])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dS>d and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y + h_])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# Padrao #5 -------------------------
if dW>d and dN>d and dS>d and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_, y])
# triangle 5
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
if dW>d and dN>d and dE>d and d>=dS:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_, y + h_/2.])
if dN>d and dE>d and dS>d and d>=dW:
# triangle 1
self.triangles.append([x, y + h_])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_, y + h_/2.])
if dW>d and dE>d and dS>d and d>=dN:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 4
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# Padrao #6 -------------------------
if dW>d and dN>d and dS>d and dE>d:
# triangle 1
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x, y])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
else:
if DEBUG_MODE:
print('warning! Not case found triangulation..')
def mesh_generation_v2(self):
c = find_children(self.qtree.root)
xx_ = [point.x for point in self.qtree.points]
yy_ = [point.y for point in self.qtree.points]
polygon_ = list(zip(xx_, yy_))
pt_countour = []
for u in c:
d = u.depth
NN = get_neighbor_of_greater_or_equal_size(u, Direction.N)
NS = get_neighbor_of_greater_or_equal_size(u, Direction.S)
NW = get_neighbor_of_greater_or_equal_size(u, Direction.W)
NE = get_neighbor_of_greater_or_equal_size(u, Direction.E)
dN = deeper(NN)
dS = deeper(NS)
dW = deeper(NW)
dE = deeper(NE)
x, y = u.x0, u.y0
w_, h_ = u.width, u.height
if is_inside_polygon(polygon_, [x,y]) and is_inside_polygon(polygon_, [x+w_,y]) and is_inside_polygon(polygon_, [x,y+h_]) and is_inside_polygon(polygon_, [x+w_,y+h_]) and len(u.points)==0:
self.nodes_inside_polygon.append(u)
# Puntos de contorno
if not node_is_inside_polygon(NN, polygon_):
pt_countour.append([x,y])
pt_countour.append([x+w_,y])
if not node_is_inside_polygon(NS, polygon_):
pt_countour.append([x,y+h_])
pt_countour.append([x+w_,y+h_])
if not node_is_inside_polygon(NW, polygon_):
pt_countour.append([x,y])
pt_countour.append([x,y+h_])
if not node_is_inside_polygon(NE, polygon_):
pt_countour.append([x+w_,y])
pt_countour.append([x+w_,y+h_])
#print(d, dN, dS, dW, dE)
# Padrao #1 -------------------------
if d>=dN and d>=dS and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# Padrao #2 -------------------------
if dW>d and d>=dN and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
if dE>d and d>=dN and d>=dS and d>=dW:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
if dN>d and d>=dW and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
if dS>d and d>=dN and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# triangle 3
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# Padrao #3 -------------------------
if dN>d and dW>d and d>=dS and d>=dE: #----1
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 4
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 6
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dE>d and d>=dS and d>=dW: #----2
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_])
# triangle 6
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_/2.])
if dS>d and dE>d and d>=dW and d>=dN: #----3
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
if dS>d and dW>d and d>=dN and d>=dE: #----4
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
# Padrao #4 -------------------------
if dW>d and dE>d and d>=dS and d>=dN:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x, y + h_/2.])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dS>d and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y + h_])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# Padrao #5 -------------------------
if dW>d and dN>d and dS>d and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_, y])
# triangle 5
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
if dW>d and dN>d and dE>d and d>=dS:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_, y + h_/2.])
if dN>d and dE>d and dS>d and d>=dW:
# triangle 1
self.triangles.append([x, y + h_])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_, y + h_/2.])
if dW>d and dE>d and dS>d and d>=dN:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 4
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# Padrao #6 -------------------------
if dW>d and dN>d and dS>d and dE>d:
# triangle 1
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x, y])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
else:
if DEBUG_MODE:
print('warning! Not case found triangulation..')
# Remover puntos repetido
seen = set()
#print(pt_countour)
for a,b in pt_countour:
if (a,b) not in seen:
self.pts_inside_contour.append([a,b])
seen.add((a,b))
def draw_delaunay(self):
points = []
xx_ = [point.x for point in self.qtree.points]
yy_ = [point.y for point in self.qtree.points]
polygon_ = list(zip(xx_, yy_))
for pt in self.qtree.points:
points.append([pt.x, pt.y])
for pt in self.pts_inside_contour:
points.append(pt)
points = np.array(points)
tri = Delaunay(points)
# Remover triangulos que estan fuera del polygono original
tri_v2 = []
for i1, i2, i3 in tri.simplices:
c_x = (points[i1][0] + points[i2][0] + points[i3][0])/3.0
c_y = (points[i1][1] + points[i2][1] + points[i3][1])/3.0
if is_inside_polygon(polygon_, [c_x, c_y]):
# Remover triangulos que estan dentro de self.nodes_inside_polygon
add = True
for el in self.nodes_inside_polygon:
x, y = el.x0, el.y0
w_, h_ = el.width, el.height
if contains(x, y, w_, h_, [Point(c_x, c_y)]):
add = False
break
if add:
tri_v2.append([i1, i2, i3])
print(tri_v2)
plt.triplot(points[:,0], points[:,1], tri_v2)
plt.plot(points[:,0], points[:,1], 'go')
def draw(self, w, h, plot_points=True):
_ = plt.figure(figsize=(12, 8))
plt.figure()
ax = plt.subplot()
ax.set_xlim(0, w)
ax.set_ylim(h, 0)
i = 0
while i<len(self.triangles):
tri = []
for _ in range(3):
tri.append(self.triangles[i])
i += 1
t1 = plt.Polygon(tri, fill=False, edgecolor='black', lw=0.5)
plt.gcf().gca().add_patch(t1)
if plot_points:
# plots the points as red dots
xx = [point.x for point in self.qtree.points]
yy = [point.y for point in self.qtree.points]
plt.plot(xx, yy, 'ro')
xx_ = [x for x,y in self.pts_inside_contour]
yy_ = [y for x,y in self.pts_inside_contour]
plt.plot(xx_, yy_, 'bo')
plt.gcf().gca().add_patch(patches.Polygon(list(zip(xx,yy)), fill=False, color='b'))
self.draw_delaunay()
plt.show()
```
#### File: GeoComp_TrabalhoFinal/quadtree/quadtree.py
```python
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from quadtree.node import Node
from quadtree.point import Point
from quadtree.utils import recursive_subdivide, contains, find_children,leaf_nodes, get_neighbor_of_greater_or_equal_size,Direction,has_to_split, DEBUG_MODE
class QTree():
def __init__(self, k, w, h, points):
# k: max points in Node
# w: width
# h: height
# points: List[Point]
self.w = w
self.h = h
self.threshold = k
self.points = points
self.root = Node(0, 0, w, h, self.points)
def add_point(self, x, y):
self.points.append(Point(x, y))
def get_points(self):
return self.points
def subdivide(self):
recursive_subdivide(self.root, self.threshold)
def balanced(self):
L = leaf_nodes(self.root)
print(len(L))
while len(L)>0:
# Remove a leaf u from L
u = L.pop(0)
# Neighbors of u
NL = [
get_neighbor_of_greater_or_equal_size(u, Direction.N),
get_neighbor_of_greater_or_equal_size(u, Direction.S),
get_neighbor_of_greater_or_equal_size(u, Direction.W),
get_neighbor_of_greater_or_equal_size(u, Direction.E)
]
split_, NL_depths = has_to_split(u, NL)
if split_:
# Add four children(nw,sw, ne,se) to u in 'self' & update their object contents
w_ = float(u.width/2)
h_ = float(u.height/2)
p = []
nw = Node(u.x0, u.y0, w_, h_, p, depth=u.depth+1, parent=u)
sw = Node(u.x0, u.y0+h_, w_, h_, p, depth=u.depth+1, parent=u)
ne = Node(u.x0 + w_, u.y0, w_, h_, p, depth=u.depth+1, parent=u)
se = Node(u.x0+w_, u.y0+h_, w_, h_, p, depth=u.depth+1, parent=u)
u.children = [nw, sw, ne, se]
# Insert four children(nw,sw, ne,se) into L
L.append(nw)
L.append(sw)
L.append(ne)
L.append(se)
# Check if nw,sw, ne,se have neighbors that should split & add them to L
for i, d in enumerate(NL_depths):
if u.depth<(d-2) and False: # Ainda com testes
uu = NL[i]
NL_uu = [
get_neighbor_of_greater_or_equal_size(uu, Direction.N),
get_neighbor_of_greater_or_equal_size(uu, Direction.S),
get_neighbor_of_greater_or_equal_size(uu, Direction.W),
get_neighbor_of_greater_or_equal_size(uu, Direction.E)
]
split_2, asdf = has_to_split(uu, NL_uu)
print(uu.depth, asdf)
if split_2:
w_ = float(uu.width/2)
h_ = float(uu.height/2)
p = []
nw_ = Node(uu.x0, uu.y0, w_, h_, p, depth=uu.depth+1, parent=uu)
sw_ = Node(uu.x0, uu.y0+h_, w_, h_, p, depth=uu.depth+1, parent=uu)
ne_ = Node(uu.x0 + w_, uu.y0, w_, h_, p, depth=uu.depth+1, parent=uu)
se_ = Node(uu.x0+w_, uu.y0+h_, w_, h_, p, depth=uu.depth+1, parent=uu)
uu.children = [nw_, sw_, ne_, se_]
print(u.depth, d, uu)
# Insert four children(nw,sw, ne,se) into L
L.append(nw_)
L.append(sw_)
L.append(ne_)
L.append(se_)
if DEBUG_MODE:
uu = get_neighbor_of_greater_or_equal_size(se, Direction.E)
print(uu)
pass
pass
def draw(self, title= "Quadtree"):
_ = plt.figure(figsize=(12, 8))
# Change init coordenada(0,0) in Top-Left
ax = plt.subplot()
ax.set_xlim(0, self.w)
ax.set_ylim(self.h, 0)
plt.title(title)
c = find_children(self.root)
print("Number of segments: %d" %len(c))
areas = set()
for el in c:
areas.add(el.width*el.height) # area=lado*lado
print("Minimum segment area: %.3f units" %min(areas))
# Plot Rectangles
for n in c:
plt.gcf().gca().add_patch(patches.Rectangle((n.x0, n.y0), n.width, n.height, fill=False))
x = [point.x for point in self.points]
y = [point.y for point in self.points]
draw_polygon = True
if draw_polygon==True:
plt.gcf().gca().add_patch(patches.Polygon(list(zip(x,y)), fill=False, color='b'))
# plots the points as red dots
plt.plot(x, y, 'ro')
plt.show()
return
def draw_points(self):
_ = plt.figure(figsize=(12, 8))
# Change init coordenada(0,0) in Top-Left
ax = plt.subplot()
ax.set_xlim(0, self.w)
ax.set_ylim(self.h, 0)
# plots the points as red dots
x = [point.x for point in self.points]
y = [point.y for point in self.points]
plt.plot(x, y, 'ro')
plt.show()
``` |
{
"source": "josehuillca/IA_Trabalho3",
"score": 3
} |
#### File: IA_Trabalho3/mycode/utils.py
```python
import nltk
nltk.download('stopwords')
from sklearn.base import BaseEstimator, TransformerMixin
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from tqdm import tqdm
import re
def classes_def(x):
if x == "Extremely Positive":
return "2"
elif x == "Extremely Negative":
return "0"
elif x == "Negative":
return "0"
elif x == "Positive":
return "2"
else: #Neutral
return "1"
## =========================== CLEAN TWEETS ===================
STOPWORDS = set(stopwords.words('english'))
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
def clean_tweets(X):
preprocessed_tweets = []
# tqdm is for printing the status bar
for sentance in tqdm(X):
sentance = re.sub(r'https?://\S+|www\.\S+', r'', sentance) # remove URLS
sentance = re.sub(r'<.*?>', r'', sentance) # remove HTML
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub(r'\d+', '', sentance).strip() # remove number
sentance = re.sub(r"[^\w\s\d]","", sentance) # remove pnctuations
sentance = re.sub(r'@\w+','', sentance) # remove mentions
sentance = re.sub(r'#\w+','', sentance) # remove hash
sentance = re.sub(r"\s+"," ", sentance).strip() # remove space
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
sentance = ' '.join([e.lower() for e in sentance.split() if e.lower() not in STOPWORDS])
preprocessed_tweets.append(sentance.strip())
return preprocessed_tweets
``` |
{
"source": "JoseIbanez/fastapi",
"score": 3
} |
#### File: src/flask/routes-deployments.py
```python
import os
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
from werkzeug.middleware.shared_data import SharedDataMiddleware
import json
app = Flask(__name__)
@app.route('/deployment', methods=['POST'])
def new_deployment():
# check if the post request has the file part
if 'file' not in request.files:
return {"error": "No file part"}, 500
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return {"error": "No selected file"}, 500
if not file or not allowed_file(file.filename):
return {"error": "Wrong type"}, 500
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return {"file": filename}, 200
@app.route('/upload-json', methods=['POST'])
def upload_json():
content = request.json
filename = secure_filename(content.get("id")+".json")
with open(os.path.join(app.config['UPLOAD_FOLDER'], filename ), 'w') as outfile:
json.dump(content, outfile)
return {"file": filename}, 200
if __name__ == "__main__":
app.run(host='0.0.0.0')
``` |
{
"source": "JoseIbanez/rp-shot",
"score": 3
} |
#### File: JoseIbanez/rp-shot/instagram_upload.py
```python
import argparse
import json
from instabot import Bot
from os.path import expanduser
import os
import glob
import shutil
def main():
parser = argparse.ArgumentParser(
description='Upload to instagram')
parser.add_argument(
'-file',
metavar='FILENAME',
type=str,
help='image to upload',
required=True)
parser.add_argument(
'-text',
metavar='NAME_VALUE',
type=str,
help='text to send',
default='My picture')
args = parser.parse_args()
file = args.file
caption = args.text
#clean_up(file)
rm_cookie()
with open(expanduser("~/.secrets/instagram")) as json_file:
credentials = json.load(json_file)
username = credentials.get("username")
password = <PASSWORD>("password")
bot = Bot()
bot.login(username=username)
bot.upload_photo(file, caption=caption)
def rm_cookie():
cookie_del = glob.glob("config/*cookie.json")
os.remove(cookie_del[0])
def clean_up(i):
dir = "config"
remove_me = "/tmp/{}.REMOVE_ME".format(i)
# checking whether config folder exists or not
if os.path.exists(dir):
try:
# removing it so we can upload new image
shutil.rmtree(dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
if os.path.exists(remove_me):
src = os.path.realpath("imgs\{}".format(i))
os.rename(remove_me, src)
if __name__ == '__main__':
main()
``` |
{
"source": "JoseIbanez/testing",
"score": 2
} |
#### File: proj05/frontend/p07.py
```python
import redis
import time
from uuid import getnode as get_mac
from flask import Flask, json, request
import collections
app = Flask(__name__)
cuenta=0
#Connect to redis server
r_server = redis.Redis("redis")
#register worker
mac=hex(get_mac())
r_server.sadd("workers","worker:"+mac)
@app.route("/")
def hello():
return "<h1 style='color:blue'>Hello there!</h1>"+mac
@app.route('/v1/vote', methods=['GET', 'POST'])
def vote():
color= request.args.get('color')
vc=r_server.hincrby("vote:"+color,"hit",1)
c=r_server.incr("hit_counter")
wc=r_server.hincrby("worker:"+mac,"hit",1)
r_server.expire("worker:"+mac,500)
oList=[]
d = collections.OrderedDict()
d['color:'+color]=vc
d['worker:'+mac]=wc
oList.append(d)
return json.dumps(oList)
@app.route('/v1/listWorkers')
def query():
oList=[]
for w in r_server.smembers("workers"):
wc=r_server.hget(w,"hit")
if (wc is None):
r_server.srem("workers",w)
continue
d = collections.OrderedDict()
d[w]=wc
oList.append(d)
return json.dumps(oList)
if __name__ == "__main__":
app.debug=True
app.run(host='0.0.0.0')
```
#### File: hcs/analyzer/anlz-tar.py
```python
import tarfile
import sys
from datetime import datetime
from os.path import expanduser
import re
import logging
import argparse
import kpi
import kpiCucm
import kpiCube
import superCmd
def lookforKnownCmd(line):
k=re.search(":utils ntp status",line)
if not k == None:
logging.debug(k)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--tar", metavar="STRING", help="Tar file to parser",
default="")
parser.add_argument("-f", "--filter", metavar="STRING", help="Filer to path",
default="")
parser.add_argument("-n", "--name", metavar="STRING", help="Simulated path",
default="")
parser.add_argument("--debug", nargs='?', help="Enable debug",
default="")
args = parser.parse_args()
if (args.debug is None):
logging.basicConfig(level=logging.DEBUG)
logging.debug("Debug active.")
else:
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.ERROR)
sc=superCmd.superCmd()
sc.cmdList.append(kpiCucm.ccmNtpStatus())
sc.cmdList.append(kpiCucm.ccmLoad())
sc.cmdList.append(kpiCucm.ccmStatus())
sc.cmdList.append(kpiCucm.ccmDBreplication())
#sc.cmdList.append(kpiCucm.ccmPerfCCM())
sc.cmdList.append(kpiCube.cubeCallStatCurrentDay())
#Tar mode
if args.tar:
srcFile=expanduser(args.tar)
numFile=0
with tarfile.open(srcFile) as tar:
for tarinfo in tar:
numFile=numFile+1
if numFile > 20000000000:
logging.debug("Stop. numFile:"+str(numFile))
break
if not tarinfo.isreg():
continue
if not sc.parseCmdPath(tarinfo.name):
continue
if not sc.knownCmd():
continue
logging.info(sc.cmdFile+","+sc.host)
f=tar.extractfile(tarinfo)
for l in f:
cmdLine=l.strip('\n').strip('\r').rstrip(' ')
sc.parseCmdLine(cmdLine)
logging.debug("End of tar, numFile:"+str(numFile))
#Stdin Mode
if args.name:
if not sc.parseCmdPath(args.name):
sys.exit()
if not sc.knownCmd():
sys.exit()
logging.info(sc.cmdFile+","+sc.host)
f=sys.stdin
for l in f:
cmdLine=l.strip('\n').strip('\r').rstrip(' ')
sc.parseCmdLine(cmdLine)
if __name__ == "__main__":
main()
```
#### File: gcloud/p001/niceTag.py
```python
tagTpl = """
<svg xmlns="http://www.w3.org/2000/svg" width="90" height="20">
<linearGradient id="a" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<rect rx="3" width="90" height="20" fill="#555"/>
<rect rx="3" x="37" width="53" height="20" fill="{{color}}"/>
<path fill="{{color}}" d="M37 0h4v20h-4z"/>
<rect rx="3" width="90" height="20" fill="url(#a)"/>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11">
<text x="19.5" y="15" fill="#010101" fill-opacity=".3">{{text1}}</text>
<text x="19.5" y="14">{{text1}}</text>
<text x="62.5" y="15" fill="#010101" fill-opacity=".3">{{text2}}</text>
<text x="62.5" y="14">{{text2}}</text>
</g>
</svg>
"""
class Tag:
def __init__(self, text1, text2):
self.text1 = text1
self.text2 = text2
self.color = "#4c1"
def selectColor(self,value):
if (value=="na"):
self.color = "#808080"
if (value=="failed"):
self.color = "#FF0000"
if (value=="fail"):
self.color = "#FF0000"
def getSVG(self):
self.selectColor(self.text2)
out = tagTpl.replace("{{text1}}", self.text1).replace("{{text2}}", self.text2)
out = out.replace("{{color}}",self.color)
return out
```
#### File: testing/gPhotos/instagram.py
```python
from instabot import Bot
from PIL import Image
import sys
from config import getConfig
def upload(input_image):
image = Image.open(f"/tmp/{input_image}")
# get lower dimmension
print(f"Origina size: {image.size}")
w, h = image.size
if w < h:
s = w
else:
s = h
box = (int(w/2 - s/2), int(h/2 - s/2), int(w/2 + s/2), int(h/2 + s/2))
print(f"Crop box: {box}")
cropped_image = image.crop(box)
cropped_image.save('/tmp/cropped_image.jpg')
# Login to Instagram
bot = Bot()
bot.login(username=getConfig('instagram.username'))
# Upload
bot.upload_photo("/tmp/cropped_image.jpg", caption ="")
if __name__ == '__main__':
upload(sys.argv[1])
```
#### File: Proj03/templates/render.py
```python
import os
from optparse import OptionParser
from jinja2 import Template
from jinja2 import Environment, FileSystemLoader
import yaml
#Get options
parser = OptionParser()
parser.add_option("-c", "--config", dest="conf",
help="Config values file")
parser.add_option("-t", "--template", dest="template",
help="Config template file")
(options, args) = parser.parse_args()
#
PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
# loader=FileSystemLoader(os.path.join(PATH, 'templates')),
loader=FileSystemLoader(".."),
trim_blocks=False)
def render_template(template_filename, context):
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
#print("Conf file:"+options.conf)
with open(options.conf, 'r') as stream:
try:
y=yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
out=render_template(options.template, y )
print out
```
#### File: pandas/clm/getNotasDay.py
```python
from bs4 import BeautifulSoup
import urllib.request
import sys
LAST_PAGES = 3
def getLastPages():
for index in range(0,LAST_PAGES):
print(index)
url = f"https://sanidad.castillalamancha.es/ciudadanos/enfermedades-infecciosas/coronavirus/notas-prensa?page={index}"
urllib.request.urlretrieve(url, f"./notas/notas-prensa-{index}.html")
def getNotesFromDay(date):
for index in range(0,LAST_PAGES):
page = f"./notas/notas-prensa-{index}.html"
getNotesFromPage(page,date)
def getNotesFromPage(page, reqDate = None):
f=open(page,"r")
html=f.read()
soup = BeautifulSoup(html,features="html.parser")
noteList = soup.find("div", {"class": "view-content"}).findAll("div",{"class":"group-right"})
#print(noteList)
for note in noteList:
#print(note)
date=note.find("span").getText().split("/")
isodate=f"{date[2]}-{date[1]}-{date[0]}"
print(f"{isodate}")
if reqDate and reqDate != isodate:
continue
print(isodate + " " + note.getText())
url =note.find("a")['href']
#print(url)
filename = url.split("/")[-1]
urllib.request.urlretrieve(url, f"./notas/{isodate}-{filename}.html")
if __name__ == '__main__':
#getLastPages()
#filename = sys.argv[1]
#getNotesFromPage(filename)
date = sys.argv[1]
getNotesFromDay(date)
```
#### File: pandas/clm/workflow.py
```python
import luigi
import json
from time import strftime
import datetime
import getNotasDay
import getText
import bqInsert
BQ_TABLE_CLM = "cases_CLM"
class DownloadLastPages(luigi.Task):
today = strftime("%Y-%m-%d")
def output(self):
return luigi.LocalTarget(f"./data/task-{self.today}.lastPages.txt")
def run(self):
getNotasDay.getLastPages()
with self.output().open('w') as target:
target.write(self.today)
class DownloadNotesDay(luigi.Task):
date = luigi.Parameter()
def requires(self):
return DownloadLastPages()
def output(self):
return luigi.LocalTarget(f"./data/task-{self.date}.notes.txt")
def run(self):
getNotasDay.getNotesFromDay(self.date)
with self.output().open('w') as target:
target.write(self.date)
class ParseNoteDay(luigi.Task):
date = luigi.Parameter()
def requires(self):
return DownloadNotesDay(self.date)
def output(self):
return luigi.LocalTarget(f"./data/task-{self.date}.data.json")
def run(self):
data=getText.parseByDate(self.date)
with self.output().open('w') as target:
target.write(json.dumps(data))
class CasesUpload(luigi.Task):
date = luigi.Parameter()
def requires(self):
return ParseNoteDay(self.date)
def output(self):
return luigi.LocalTarget(f"./data/task-{self.date}.uploaded.txt")
def run(self):
with self.input().open('r') as infile:
cases = json.loads(infile.read())
#print("filename: "+filename)
result = bqInsert.insert_table(BQ_TABLE_CLM, cases)
with self.output().open('w') as target:
target.write("OK")
class MasterTask(luigi.Task):
date = luigi.Parameter()
def requires(self):
return [ CasesUpload(self.date) ]
def output(self):
return luigi.LocalTarget(f"./data/task-{self.date}.master.txt")
def run(self):
with self.output().open('w') as target:
target.write('OK')
class M100Task(luigi.Task):
numdays = luigi.Parameter()
def requires(self):
base = datetime.datetime.today()
date_list = [base - datetime.timedelta(days=x) for x in range(int(self.numdays))]
for date in date_list:
dateiso = date.strftime("%Y-%m-%d")
print(dateiso)
yield MasterTask(dateiso)
if __name__ == '__main__':
luigi.run
```
#### File: pandas/espana/getCases.py
```python
import sys
import json
from tableMap import getTableMap, getTableCols
REGIONS = [
{"id":"ES", "name":"ESPAÑA", "isoname":"España"},
{"id":"ES", "name":"TOTAL", "isoname":"España"},
{"id":"ES", "name":"Total general", "isoname":"España"},
{"id":"AN", "name":"Andalucía", "isoname":"Andalucía"},
{"id":"AR", "name":"Aragón", "isoname":"Aragón"},
{"id":"AS", "name":"Asturias"},
{"id":"IB", "name":"Baleares","isoname":"Illes Balears"},
{"id":"IB", "name":"Islas Baleares","isoname":"Illes Balears"},
{"id":"CN", "name":"Canarias", "isoname":"Canarias"},
{"id":"CN", "name":"Islas Canarias", "isoname":"Canarias"},
{"id":"CB", "name":"Cantabria"},
{"id":"CM", "name":"Castilla La Mancha"},
{"id":"CL", "name":"Castilla y León","isoname":"Castilla y León"},
{"id":"CT", "name":"Cataluña"},
{"id":"CE", "name":"Ceuta"},
{"id":"VC", "name":"<NAME>"},
{"id":"VC", "name":"C Valenciana"},
{"id":"EX", "name":"Extremadura"},
{"id":"GA", "name":"Galicia"},
{"id":"MD", "name":"Madrid"},
{"id":"ML", "name":"Melilla"},
{"id":"MC", "name":"Murcia"},
{"id":"NC", "name":"Navarra"},
{"id":"PV", "name":"<NAME>asco"},
{"id":"RI", "name":"La Rioja"}
]
def searchLocation(in_name):
id = None
name = in_name.replace('*','')
for region in REGIONS:
if name == region["name"]:
id = region["id"]
#print(f"Name: {name}, Id:{id}")
break
if not id:
raise RuntimeError(f"Location: {name}, ISO Id not found")
return id
def getConfirmedCases(index,cases,date):
result = []
for case in cases:
item={}
item["index"]=index
item["region_name"]=case["region_name"]
item["region_iso"]=searchLocation(case["region_name"])
item["date"]=date["isodate"]
item["cases_total"] = case["cases_total"]
item["cases_1day"] = case["cases_1day"]
item["cases_14d"] = case.get("cases_14d")
item["cases_14d_ai"] = case["cases_14d_ai"]
item["cases_7d"] = case.get("cases_7d")
item["cases_7d_ai"] = case.get("cases_7d_ai")
item["symptoms_14d"] = case.get("symptoms_14d")
item["symptoms_14d_ai"] = case.get("symptoms_14d_ai")
item["symptoms_7d"] = case.get("symptoms_7d")
item["symptoms_7d_ai"] = case.get("symptoms_7d_ai")
item["id"] = f"{index}.{item['region_iso']}"
#print(json.dumps(item))
result.append(item)
return result
def prepareBedDict(beds):
tableName = "beds"
colInt, colFloat = getTableCols(tableName)
bedDict = {}
for b in beds:
region= searchLocation(b["region_name"])
bedDict[region]={}
for c in colInt + colFloat:
#print(f"{region} {c}")
bedDict[region][c] = b.get(c)
return bedDict
def getHospital(index,hospital,beds,date):
bedDict = prepareBedDict(beds)
result = []
for h in hospital:
item={}
item["index"]=index
region=searchLocation(h["region_name"])
item["region_iso"]=region
item["region_name"]= h["region_name"]
item["date"]=date["isodate"]
item["id"] = f"{index}.{region}"
count_h = 0
count_b = 0
tableName = "hospital"
colInt, colFloat = getTableCols(tableName)
for c in colInt + colFloat:
item[c] = h.get(c)
count_h += 1 if not item[c] is None else 0
tableName = "beds"
colInt, colFloat = getTableCols(tableName)
for c in colInt + colFloat:
item[c] = bedDict[region][c]
count_b += 1 if not item[c] is None else 0
print(f"{item['id']}, hospital:{count_h}, beds:{count_b}")
#print(json.dumps(item))
result.append(item)
return result
if __name__ == '__main__':
index = sys.argv[1]
date = None
cases = None
with open(f"./data/task-{index}.date.json", 'r') as file:
date = json.loads(file.read())
#with open(f"./data/task-{index}.cases.json", 'r') as file:
# cases = json.loads(file.read())
#t_cases = getConfirmedCases(index,cases,date)
with open(f"./data/task-{index}.hospital.json", 'r') as file:
hospital = json.loads(file.read())
with open(f"./data/task-{index}.beds.json", 'r') as file:
beds = json.loads(file.read())
t_hospital = getHospital(index,hospital,beds,date)
```
#### File: pandas/espana/getPdf.py
```python
import urllib.request
def getGovPdf(index):
baseurl="https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/"
filename = f"Actualizacion_{index}_COVID-19.pdf"
url = baseurl + filename
print (url)
urllib.request.urlretrieve(url, f"./data/{filename}")
return filename
#raise RuntimeError("Download failed!")
if __name__ == '__main__':
pdf=getGovPdf(100)
```
#### File: pandas/espana/pdf2cases.py
```python
import tabula
import sys
import json
import getTable
from tableMap import getTableMap, getTableCols
def processCasesTable(filename):
# Get Index & TableMap
index = getTable.getIdFromFilename(filename)
tableName = "cases"
tableMap = getTableMap(index,tableName)
colInt, colFloat = getTableCols(tableName)
# Read pdf into list of DataFrame
if tableMap.get("template"):
print(f"Using template.")
df = tabula.read_pdf_with_template(f"./data/{filename}", f"./templates/{tableMap['template']}")
else:
df = tabula.read_pdf(f"./data/{filename}", pages=tableMap["page"])
# Select table in page
conf = getTable.checkSize(df,18,len(tableMap["colNames"]))
# Remove header
getTable.rmHeader(conf)
# Rename columns
conf.columns=tableMap["colNames"]
#print(conf)
print(conf)
if ("cases_g14" in tableMap["colNames"]):
c14 = conf["cases_g14"].str.split(" ",n=1,expand=True)
conf["cases_14d"]=c14[0]
conf["cases_14d_ai"]=c14[1]
if ("cases_g7" in tableMap["colNames"]):
c7 = conf["cases_g7"].str.split(" ",n=1,expand=True)
conf["cases_7d"]=c7[0]
conf["cases_7d_ai"]=c7[1]
if ("symptoms_g14" in tableMap["colNames"]):
s14 = conf["symptoms_g14"].str.split(" ",n=1,expand=True)
conf["symptoms_14d"]=s14[0]
conf["symptoms_14d_ai"]=s14[1]
if ("symptoms_g7" in tableMap["colNames"]):
s7 = conf["symptoms_g7"].str.split(" ",n=1,expand=True)
conf["symptoms_7d"]=s7[0]
conf["symptoms_7d_ai"]=s7[1]
#Convert to Int
getTable.cols2int(conf,colInt)
#Convert to Float
getTable.cols2float(conf,colFloat)
print(conf)
result = json.loads(conf.to_json(orient="records"))
return result
if __name__ == '__main__':
pdf=processCasesTable(sys.argv[1])
```
#### File: pandas/espana/pdf2date.py
```python
import sys
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.layout import LAParams
import io
import re
import json
def pdfToken(filename):
fp = open(f"./data/{filename}", 'rb')
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
#codec = 'utf-8'
laparams = LAParams()
#device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
#for page in PDFPage.get_pages(fp):
# interpreter.process_page(page)
# data = retstr.getvalue()
# Proccess page 1
page = next(PDFPage.get_pages(fp),None)
interpreter.process_page(page)
data = retstr.getvalue()
index = None
date = None
lines = data.split("\n")
regex = re.compile(r"Actualizaci.n n. (\d+). Enfermedad por el coronavirus .COVID-19.\. (\d+\.\d+\.\d+) .datos consolidados ")
for line in lines:
result = regex.match(line)
if not result:
continue
print(line)
print(result)
index = result.group(1)
date = result.group(2)
print(f"Detected token, index:{index} with date:{date}")
break
if not date:
raise RuntimeError("Token not found in pdf. No index, no date!")
result = re.match(r"(\d+)\.(\d+)\.(\d+)",date)
isodate=f"{result.group(3)}-{result.group(2)}-{result.group(1)}"
print(f"Result, index:{index}, isodate:{isodate}.")
return {
"index": index,
"isodate": isodate
}
#print(data)
if __name__ == '__main__':
ret = pdfToken(sys.argv[1])
print(json.dumps(ret))
#with open(f"./data/task-{index}.date.json", "w") as f:
# f.write("Purchase Amount: %s" % TotalAmount)
``` |
{
"source": "joseiba/SysVetSoft",
"score": 2
} |
#### File: apps/caja/views.py
```python
import json
import math
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib import messages
from django.db.models import Q
from django.core.paginator import Paginator
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from datetime import datetime
from io import BytesIO
from reportlab.pdfgen import canvas
from django.views.generic import View
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
from reportlab.lib.units import cm
from reportlab.lib import colors
from apps.caja.models import Caja
from apps.configuracion.models import ConfiEmpresa
from apps.compras.models import FacturaCompra
from apps.ventas.factura.models import FacturaCabeceraVenta
# Create your views here.
date = datetime.now()
today = date.strftime("%d/%m/%Y")
@login_required()
@permission_required('caja.view_caja')
def list_cajas(request):
return render(request, 'caja/apertura_caja.html')
@login_required()
def list_caja_ajax(request):
query = request.GET.get('busqueda')
if query != "":
caja = Caja.objects.exclude(apertura_cierre="C").filter(Q(fecha_hora_alta__icontains=query))
else:
caja = Caja.objects.exclude(apertura_cierre="C").all()
total = caja.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
caja = caja[start:start + length]
data = [{'id': ca.id, 'fecha_alta': ca.fecha_hora_alta, 'fecha_cierre': ca.fecha_cierre, 'saldo_inicial': ca.saldo_inicial,
'total_ingreso': ca.total_ingreso, 'total_egreso' : ca.total_egreso, 'saldo_entregar': ca.saldo_a_entregar, 'estado': ca.apertura_cierre } for ca in caja]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
@permission_required('caja.view_caja')
def list_historico_caja(request):
return render(request, 'caja/list_historial_caja.html')
@login_required()
def get_list_caja_historico(request):
query = request.GET.get('busqueda')
if query != "":
caja = Caja.objects.exclude(apertura_cierre="A").filter(Q(fecha_hora_alta__icontains=query))
else:
caja = Caja.objects.exclude(apertura_cierre="A").all()
total = caja.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
caja = caja[start:start + length]
data = [{'id': ca.id, 'fecha_alta': ca.fecha_hora_alta, 'fecha_cierre': ca.fecha_cierre, 'saldo_inicial': ca.saldo_inicial,
'total_ingreso': ca.total_ingreso, 'total_egreso' : ca.total_egreso, 'saldo_entregar': ca.saldo_a_entregar, 'estado': ca.apertura_cierre } for ca in caja]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
@permission_required('caja.add_caja')
def add_caja(request):
monto_initial = get_config()
caja_abierta = Caja.objects.exclude(apertura_cierre="C").filter(fecha_alta=today)
if caja_abierta.count() > 0:
messages.success(request, 'Ya tienes una caja abierta!')
return redirect('/caja/listCajas/')
else:
caja_cerrada = Caja.objects.exclude(apertura_cierre="A").filter(fecha_alta=today)
if caja_cerrada.count() > 0:
messages.success(request, 'Ya has hecho una apertura de caja en el dia!')
return redirect('/caja/listCajas/')
else:
apertura = Caja()
apertura.saldo_inicial = monto_initial
apertura.saldo_inicial_formateado = "Gs. " + "{:,}".format(int(monto_initial)).replace(",",".")
apertura.save()
messages.success(request, 'Apertura de caja correctamente!')
return redirect('/caja/listCajas/')
def cerrar_caja(request, id):
try:
caja_cierre = Caja.objects.get(id=id)
if caja_cierre.apertura_cierre != "C":
sum_total_compras = sum_factura_compra()
caja_cierre.total_efectivo = sum_efectivo_factura_venta()
caja_cierre.total_efectivo_formateado = "Gs. " + "{:,}".format(int(caja_cierre.total_efectivo)).replace(",",".")
caja_cierre.total_pos = sum_pos_factura_venta()
caja_cierre.total_pos_formateado = "Gs. " + "{:,}".format(int(caja_cierre.total_pos)).replace(",",".")
sum_total_venta = sum_factura_venta()
saldo_entregrar = sum_total_venta - caja_cierre.saldo_inicial
caja_cierre.total_ingreso = sum_total_venta
caja_cierre.total_ingreso_formateado = "Gs. " + "{:,}".format(int(sum_total_venta)).replace(",",".")
caja_cierre.total_egreso = sum_total_compras
caja_cierre.total_egreso_formateado = "Gs. " + "{:,}".format(int(sum_total_compras)).replace(",",".")
if saldo_entregrar > 0:
caja_cierre.saldo_a_entregar = saldo_entregrar
caja_cierre.saldo_a_entregar_formateado = "Gs. " + "{:,}".format(int(saldo_entregrar)).replace(",",".")
else:
caja_cierre.saldo_a_entregar = 0
caja_cierre.saldo_a_entregar_formateado = "Gs. " + "{:,}".format(0).replace(",",".")
caja_cierre.apertura_cierre = "C"
caja_cierre.fecha_cierre = date.strftime("%d/%m/%Y %H:%M:%S hs")
caja_cierre.save()
messages.success(request, 'Cierre de caja correctamente!')
return redirect('/caja/listCajas/')
else:
messages.success(request, 'Esta caja ya esta cerrada!')
return redirect('/caja/listCajas/')
except Exception as e:
messages.success(request, 'Ha ocurrido un error!')
return redirect('/caja/listCajas/')
def sum_factura_compra():
try:
factura = FacturaCompra.objects.exclude(factura_caja="S").filter(fecha_alta=today)
sum_total = 0
for fac in factura:
sum_total += fac.total
fac.factura_caja = "S"
fac.save()
return sum_total
except Exception as e:
return 0
def sum_efectivo_factura_venta():
try:
factura = FacturaCabeceraVenta.objects.exclude(factura_caja="S").filter(fecha_alta=today)
su_efectivo = 0
for fac in factura:
if fac.contado_pos == "C":
su_efectivo += fac.total
return su_efectivo
except Exception as e:
return 0
def sum_pos_factura_venta():
try:
factura = FacturaCabeceraVenta.objects.exclude(factura_caja="S").filter(fecha_alta=today)
su_pos = 0
for fac in factura:
if fac.contado_pos == "P":
su_pos += fac.total
return su_pos
except Exception as e:
return 0
def sum_factura_venta():
try:
factura = FacturaCabeceraVenta.objects.exclude(factura_caja="S").filter(fecha_alta=today)
sum_total = 0
for fac in factura:
sum_total += fac.total
fac.factura_caja = "S"
fac.save()
return sum_total
except Exception as e:
return 0
def get_config():
try:
confi = ConfiEmpresa.objects.get(id=1)
monto_split = confi.apertura_caja_inicial.split('.')
monto_formateado = ""
for monto in monto_split:
monto_formateado += monto
return float(monto_formateado)
except Exception as e:
return 300000
def reporte_caja_pdf(request, id):
caja = Caja.objects.get(id=id)
confi = ConfiEmpresa.objects.get(id=1)
#Indicamos el tipo de contenido a devolver, en este caso un pdf
response = HttpResponse(content_type='application/pdf')
#La clase io.BytesIO permite tratar un array de bytes como un fichero binario, se utiliza como almacenamiento temporal
buffer = BytesIO()
#Canvas nos permite hacer el reporte con coordenadas X y Y
pdf = canvas.Canvas(buffer)
#Llamo al método cabecera donde están definidos los datos que aparecen en la cabecera del reporte.
#self.cabecera(pdf)
#Con show page hacemos un corte de página para pasar a la siguiente
#Establecemos el tamaño de letra en 16 y el tipo de letra Helvetica
pdf.setFont("Helvetica", 18)
#Dibujamos una cadena en la ubicación X,Y especificada
pdf.drawString(210, 790, u"Detalle Caja del Dia")
pdf.setFont("Helvetica", 12)
pdf.drawString(30, 760, u"Nombre Empresa: " + confi.nombre_empresa)
pdf.drawString(30, 740, u"Direccion: " + confi.direccion)
pdf.drawString(30, 720, u"Cuidad: " + confi.cuidad)
pdf.drawString(300, 760, u"Fecha Apertura: " + caja.fecha_hora_alta)
pdf.drawString(300, 740, u"Fecha Cierre: " + caja.fecha_cierre)
y = 700
pdf.drawString(50, 700, u"----------------------------------------------------Detalle--------------------------------------------------")
pdf.drawString(50, 670, u"Saldo Inicial: ")
pdf.drawString(200, 670, u"" + caja.saldo_inicial_formateado)
pdf.drawString(50, 640, u"Total cobrado en Pos: ")
pdf.drawString(200, 640, u"" + caja.total_pos_formateado)
pdf.drawString(50, 610, u"Total cobrado en Efectivo: ")
pdf.drawString(200, 610, u"" + caja.total_efectivo_formateado)
pdf.drawString(50, 580, u"Total a Ingreso del Dia: ")
pdf.drawString(200, 580, u"" + caja.total_ingreso_formateado)
pdf.drawString(50, 550, u"Saldo a entregar: ")
pdf.drawString(200, 550, u"" + caja.saldo_a_entregar_formateado)
#tabla_report(pdf, y, caja)
pdf.showPage()
pdf.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
def tabla_report(pdf, y, caja):
#Creamos una tupla de encabezados para neustra tabla
encabezados = ('Codigo', 'Producto', 'Descripción', 'Cantidad', 'Precio \n Unitario', 'Total')
pedido_detalle = PedidoDetalle.objects.filter(id_pedido_cabecera=id).order_by('last_modified')
count_detalle = 2
#Creamos una lista de tuplas que van a contener a las personas
detalles = [(pedi.id_pedido.id_producto.codigo_producto, pedi.id_pedido.id_producto.nombre_producto,
pedi.id_pedido.id_producto.descripcion, pedi.cantidad, '', '') for pedi in pedido_detalle]
detalles_extras = [('', '', '', '', '', '') for i in range(count_detalle)]
detalle_orden = Table([encabezados] + detalles + detalles_extras, colWidths=[2.5 * cm, 3 * cm, 7* cm, 2 * cm, 3 * cm, 3 * cm])
#Aplicamos estilos a las celdas de la tabla
detalle_orden.setStyle(TableStyle(
[
#La primera fila(encabezados) va a estar centrada
('ALIGN',(0,0),(3,0),'CENTER'),
#Los bordes de todas las celdas serán de color negro y con un grosor de 1
('GRID', (0, 0), (-1, -1), 1, colors.black),
#El tamaño de las letras de cada una de las celdas será de 10
('FONTSIZE', (0, 0), (-1, -1), 10),
]
))
position = int(((pedido_detalle.count() + count_detalle) * 50 ) / (2))
pdf.setFont("Helvetica", 12)
pdf.drawString(480, ((680 - position)) , u"Total: ",)
#Establecemos el tamaño de la hoja que ocupará la tabla
detalle_orden.wrapOn(pdf, 800, 600)
#Definimos la coordenada donde se dibujará la tabla
detalle_orden.drawOn(pdf, 10, 700 - position)
```
#### File: apps/compras/models.py
```python
from django.db import models
from apps.ventas.producto.models import Producto
from datetime import datetime
# Create your models here.
date = datetime.now()
class Proveedor(models.Model):
"""[summary]
Args:
models ([Proveedor]): [Contiene la informacion de los proveedores]
"""
nombre_proveedor = models.CharField(max_length=500, help_text="Ingrese nombre del proveedor")
direccion = models.CharField(max_length=500, help_text="Ingrese la direccion")
ruc_proveedor = models.CharField(max_length=500, default="-", help_text="Ingrese el ruc del proveedor")
telefono = models.CharField(max_length = 500, help_text="Ingrese el telefono del proveedor")
email = models.EmailField(max_length = 500, help_text = "Ingrese email del proveedor", null=True, blank=True, default="-")
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_proveedor', 'Agregar Proveedor'),
('change_proveedor', 'Editar Proveedor'),
('delete_proveedor', 'Eliminar Proveedor'),
('view_proveedor', 'Listar Proveedores'))
def __str__(self):
return 'Proveedor: %s - ruc: %s' % (self.nombre_proveedor, self.ruc_proveedor)
class Pedido(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
cantidad_pedido = models.CharField(max_length=500, blank=True, null=True, default="-")
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y %H:%M:%S hs"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_pedido', 'Agregar Pedido'),
('change_pedido', 'Editar Pedido'),
('delete_pedido', 'Eliminar Pedido'),
('view_pedido', 'Listar Pedido'))
def obtener_dict(self):
dict = {}
dict['codigo_producto'] = self.id
dict['codigo_real'] = self.id_producto.id
dict['nombre'] = self.id_producto.nombre_producto
dict['description'] = self.id_producto.descripcion
dict['precio'] = self.id_producto.precio_compra
dict['cantidad_pedido'] = self.cantidad_pedido
return dict
def __str__(self):
return self.id_producto.nombre_producto
class PedidoCabecera(models.Model):
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Pedido Cabecera"
verbose_name_plural = "Pedido Cabeceras"
default_permissions = ()
permissions = (
('add_pedidocabecera', 'Agregar Pedido'),
('change_pedidocabecera', 'Editar Pedido'),
('delete_pedidocabecera', 'Eliminar Pedido'),
('view_pedidocabecera', 'Listar Pedido'))
def __str__(self):
return self.fecha_alta
class PedidoDetalle(models.Model):
"""Model definition for Pedido Detalle."""
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
class Meta:
"""Meta definition for Pedido Detalle"""
verbose_name = 'Pedido Detalle'
verbose_name_plural = 'Pedido Detalle'
default_permissions = ()
permissions = (
('add_pedidodetalle', 'Agregar Pedido'),
('change_pedidodetalle', 'Editar Pedido'),
('delete_pedidodetalle', 'Eliminar Pedido'),
('view_pedidodetalle', 'Listar Pedido'))
def __str__(self):
"""Unicode representation of Pedido Detalle."""
pass
class Pago(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
metodo_pago = models.CharField(max_length=100)
descripcion = models.TextField()
class Meta:
verbose_name = "Pago"
verbose_name_plural = "Plural"
ESTADOS_FACTURA = [
('PENDIENTE', 'Pendiente'),
('CANCELADO', 'Cancelado'),
('FINALIZADO', 'Finalizado'),
]
class FacturaCompra(models.Model):
nro_factura = models.CharField(max_length=500, null=True)
nro_timbrado = models.CharField(max_length=500, null=True)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y"), null=True)
fecha_emision_factura = models.CharField(max_length=500, null=True)
fecha_emision = models.CharField(max_length=500, null=True)
fecha_vencimiento = models.CharField(max_length=500, null=True)
tipo_factura = models.BooleanField(default=True)
estado = models.CharField(max_length=500, choices=ESTADOS_FACTURA, default=ESTADOS_FACTURA[0])
total_iva = models.IntegerField(default=0)
total = models.FloatField(default=0)
factura_cargada_producto = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_cargada_pedido = models.CharField(max_length=2, default="N", blank=True, null=True)
pedidod_to_factura = models.CharField(max_length=2, default="N", blank=True, null=True)
facturado = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_caja = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_proveedor = models.ForeignKey('Proveedor', on_delete=models.CASCADE, null=True)
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE, null=True)
def __str__(self):
return 'Factura Compra: %s - Proveedor: %s' % (self.nro_factura, self.id_proveedor)
class Meta:
verbose_name = 'Factura Compra'
verbose_name_plural = 'Facturas Compras'
default_permissions = ()
permissions = (
('add_facturacompra', 'Agregar Factura Compra'),
('change_facturacompra', 'Editar Factura Compra'),
('delete_facturacompra', 'Eliminar Factura Compra'),
('view_facturacompra', 'Listar Factura Compra'))
class FacturaDet(models.Model):
id_factura = models.ForeignKey('FacturaCompra', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
precio_compra = models.CharField(max_length=800, blank=True, null=True)
detalle_cargado_reporte = models.CharField(max_length=2, default="N", blank=True, null=True)
detalle_cargado_mes = models.CharField(max_length=2, default="N", blank=True, null=True)
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
ordering = ['id']
default_permissions = ()
permissions = (
('add_facturadet', 'Agregar Factura Compra'),
('change_facturadet', 'Editar Factura Compra'),
('delete_facturadet', 'Eliminar Factura Compra'),
('view_facturadet', 'Listar Factura Compra'))
```
#### File: apps/configuracion/models.py
```python
from django.db import models
from datetime import date, datetime, timedelta
from apps.ventas.producto.models import Producto
#Tupla dinamica para las horas
HORAS_SER = []
# Create your models here.
class ConfiEmpresa(models.Model):
"""
Clase que define la la configuracion de la empresa
"""
apertura_caja_inicial = models.CharField(max_length=200, blank=True, null=True)
ubicacion_deposito_inicial = models.CharField(max_length=200, blank=True, null=True)
nombre_empresa = models.CharField(max_length=500, blank=True, null=True)
direccion = models.CharField(max_length=500, blank=True, null=True)
cuidad = models.CharField(max_length=500, blank=True, null=True)
telefono = models.CharField(max_length=500, blank=True, null=True)
nro_timbrado = models.CharField(max_length=500, blank=True, null=True)
fecha_inicio_timbrado = models.CharField(max_length=500, blank=True, null=True)
fecha_fin_timbrado = models.CharField(max_length=500, blank=True, null=True)
ruc_empresa = models.CharField(max_length=500, blank=True, null=True)
dias_a_vencer = models.IntegerField(blank=True, null=True, default=30)
dias_alert_vacunas = models.IntegerField(blank=True, null=True, default=30)
class Meta:
verbose_name = "Configuracion Empresa"
verbose_name_plural = "Configuraciones Empresas"
default_permissions = ()
permissions = (
('add_confiempresa', 'Agregar Configuracion'),
('change_confiempresa', 'Editar Configuracion'),
('delete_confiempresa', 'Eliminar Configuracion'),
('view_confiempresa', 'Listar Configuraciones'))
def __str__(self):
"""Formato de configurcion"""
return '{0}'.format(self.id)
# Create your models here.
class Servicio(models.Model):
"""
Clase que define la estructura de un Servicio
"""
nombre_servicio = models.CharField(max_length = 200, help_text = "Ingrese nombre del servicio")
precio_servicio = models.CharField(max_length = 200, help_text = "Ingrese el precio del servicio")
min_serv = models.CharField(max_length = 200)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=200, blank=True, null=True, default="S")
class Meta:
verbose_name = "Servicio"
verbose_name_plural = "Servicios"
default_permissions = ()
permissions = (
('add_servicio', 'Agregar Servicio'),
('change_servicio', 'Editar Servicio'),
('delete_servicio', 'Eliminar Servicio'),
('view_servicio', 'Listar Servicios'))
def __str__(self):
"""Formato del servicio"""
return '{0}'.format(self.nombre_servicio)
def get_absolute_url(self):
"""Retorna el URL para acceder a una instancia de una ciudad en particular."""
return reverse('servicio-detail', args=[str(self.id)])
def obtener_dict(self):
dict = {}
dict['codigo_producto'] = self.id
dict['nombre'] = self.nombre_servicio
dict['description'] = self.nombre_servicio
dict['precio'] = self.precio_servicio
dict['tipo'] = 'S'
return dict
class Empleado(models.Model):
nombre_emp = models.CharField(max_length=200)
apellido_emp = models.CharField(max_length=200)
ci_empe = models.CharField(max_length=200)
disponible = models.BooleanField(blank=True, null=True, default=True)
emp_disponible_reserva = models.CharField(max_length=200, blank=True, null=True, default="S")
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=200, blank=True, null=True, default="S")
id_servicio = models.ForeignKey('Servicio', on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Empleado"
verbose_name_plural = "Empleados"
default_permissions = ()
permissions = (
('add_empleado', 'Agregar Empleado'),
('change_empleado', 'Editar Empleado'),
('delete_empleado', 'Eliminar Empleado'),
('view_empleado', 'Listar Empleados'))
def __str__(self):
"""Formato del empleado"""
return '{0}'.format(self.nombre_emp)
class TipoVacuna(models.Model):
opciones = (
('S', 'Si'),
('N', 'No'),
)
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=False)
nombre_vacuna = models.CharField(max_length = 500, blank=True, null=True)
periodo_aplicacion = models.CharField(max_length = 500, blank=True, null=True)
multi_aplicaciones = models.CharField(max_length=2, choices=opciones, default="N", blank=True, null=True)
class Meta:
verbose_name = "Vacunas"
verbose_name_plural = "Vacunas"
default_permissions = ()
permissions = (
('add_tipovacuna', 'Agregar Vacuna'),
('change_tipovacuna', 'Editar Vacuna'),
('delete_tipovacuna', 'Eliminar Vacuna'),
('view_tipovacuna', 'Listar Vacunas'))
def __str__(self):
"""Formato de la vacuna"""
return '{0}'.format(self.nombre_vacuna)
```
#### File: apps/usuario/forms.py
```python
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth.models import Group, Permission
from django.forms import *
from django import forms
from apps.usuario.models import User
class FormLogin(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(FormLogin, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'input100'
self.fields['username'].widget.attrs['placeholder'] = 'Nombre de usuario'
self.fields['password'].widget.attrs['class'] = 'input100'
self.fields['password'].widget.attrs['placeholder'] = '<PASSWORD>'
class UserForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['class'] = 'form-control'
self.fields['groups'].widget.attrs['class'] = 'group_select'
self.fields['groups'].widget.attrs['required'] = 'required'
self.fields['groups'].widget.attrs['style'] = 'width: 100%'
class Meta():
model = User
fiels = ("first_name", "last_name", "email", "username", "groups", "<PASSWORD>", "<PASSWORD>")
widgets = {
'first_name': forms.TextInput(
attrs={
'class': 'form-control','name': 'first_name', 'placeholder': 'Ingrese el nombre del usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
'last_name': forms.TextInput(
attrs={
'class': 'form-control','name': 'last_name', 'placeholder': 'Ingrese el apellido del usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
'email': forms.TextInput(
attrs={
'class':'form-control optional', 'placeholder': 'Email','name':'email', 'type':'email', 'id':'email', 'autocomplete':"off"
}
),
'username': forms.TextInput(
attrs={
'class': 'form-control','name': 'username', 'placeholder': '<NAME> usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
}
exclude = ['user_permissions', 'last_login', 'date_joined', 'is_superuser', 'is_active', 'is_staff', 'password', 'profile']
help_texts = {
'username' : None,
'email': None,
'first_name': None,
'last_name': None,
'password1': None,
'password2': None,
}
def save(self, commit=True):
user = super().save(commit=False)
if commit:
user.save()
for grupo in self.cleaned_data['groups']:
user.groups.add(grupo)
return user
class UserFormChange(UserChangeForm):
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
self.fields['groups'].widget.attrs['class'] = 'group_select'
self.fields['groups'].widget.attrs['required'] = 'required'
self.fields['groups'].widget.attrs['style'] = 'width: 100%'
for fieldname in ['username']:
self.fields[fieldname].help_text = None
try:
if not self.user.has_perms(['usuario.add_user']):
for fieldname in ['groups']:
self.fields[fieldname].widget.attrs['class'] = 'd-none'
self.fields[fieldname].label = ''
self.fields[fieldname].help_text = None
except:
pass
password = None
class Meta:
model = User
fields = ("first_name", "last_name", "email", "username", "groups")
widgets = {
'first_name': forms.TextInput(
attrs={
'class': 'form-control','name': 'first_name', 'placeholder': 'Ingrese el nombre del usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
'last_name': forms.TextInput(
attrs={
'class': 'form-control','name': 'last_name', 'placeholder': 'Ingrese el apellido del usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
'email': forms.TextInput(
attrs={
'class':'form-control optional', 'placeholder': 'Email','name':'email', 'type':'email', 'id':'email', 'autocomplete':"off"
}
),
'username': forms.TextInput(
attrs={
'class': 'form-control','name': 'username', 'placeholder': 'Nombre de usuario','onkeyup':'replaceDirection(this)', 'required': 'required', 'autocomplete':"off"
}
),
}
exclude = ['user_permissions', 'last_login', 'date_joined', 'is_superuser', 'is_active', 'is_staff', 'password','profile']
help_texts = {
'username' : None,
'email': None,
'first_name': None,
'last_name': None,
'password': <PASSWORD>,
}
def save(self, commit=True):
form = super()
if form.is_valid():
user = form.save(commit=False)
user.groups.clear()
for grupo in self.cleaned_data['groups']:
user.groups.add(grupo)
return user
queryset = [
"user",
"producto",
"especie",
"raza",
"cliente",
"mascota",
"reserva",
"confiempresa",
"servicio",
"empleado",
"facturacompra",
"proveedor",
"pedidocabecera",
"facturacabeceraventa",
"inventario",
"tipovacuna",
"reporte"]
class GroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['permissions'].queryset = Permission.objects.filter(content_type__model__in=queryset)
class Meta:
model = Group
fields = ('name', 'permissions')
widgets = {
'name': forms.TextInput(attrs={
'class':'form-control', 'autocomplete': 'off',
}),
'permissions': forms.CheckboxSelectMultiple(),
}
class GroupChangeForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['permissions'].queryset = Permission.objects.filter(content_type__model__in=queryset)
class Meta:
model = Group
fields = ('name', 'permissions')
widgets = {
'name': forms.TextInput(attrs={
'class':'form-control', 'autocomplete': 'off',
}),
'permissions': forms.CheckboxSelectMultiple(),
}
class ContraseñaChangeForm(PasswordChangeForm):
def __init__(self , user, *args, **kwargs):
self.user = user
super().__init__(user, *args, **kwargs)
self.fields['old_password'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['old_password'].widget.attrs['required'] = 'required'
self.fields['new_password1'].widget.attrs['required'] = 'required'
self.fields['new_password2'].widget.attrs['required'] = 'required'
def clean_old_password(self):
"""
Validate that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise ValidationError("La contraseña actual no coinciden!")
return old_password
```
#### File: apps/usuario/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db import models
from sysvet.settings import STATIC_URL, MEDIA_URL
class User(AbstractUser):
profile = models.ImageField(upload_to='user/fotos', null=True, blank=True)
def get_profile(self):
if self.profile:
return '{}{}'.format(MEDIA_URL, self.profile)
return '{}{}'.format(STATIC_URL, 'img/profile.png')
class Meta:
verbose_name = 'Usuario'
verbose_name_plural = 'Usuarios'
default_permissions = ()
permissions = (
('add_user', 'Agregar Usuario'),
('change_user', 'Editar Usuario'),
('delete_user', 'Eliminar Usuario'),
('view_user', 'Listar Usuarios'))
```
#### File: apps/usuario/views.py
```python
from django.shortcuts import render, redirect
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import login, logout, authenticate, update_session_auth_hash
from django.http import HttpResponseRedirect, JsonResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib import messages
from datetime import time, datetime
from django.db.models import Q
from django.contrib.auth.models import Group
from django.contrib.auth.forms import PasswordChangeForm
import json
from apps.usuario.forms import FormLogin, UserForm, UserFormChange, GroupForm, GroupChangeForm, ContraseñaChangeForm
from apps.usuario.models import User
from apps.configuracion.models import ConfiEmpresa
from apps.utiles.views import *
# Create your views here.
class Login(FormView):
"""[summary]
Args:
FormView ([Login]): [clase que ingresa al login]
Returns:
[Login]: [Retorna el index si esta autenticado o si no al login]
"""
template_name = 'registration/login.html'
form_class = FormLogin
success_url = reverse_lazy('index')
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self,request,*args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(self.get_success_url())
else:
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
usuario = authenticate(username=username, password=password)
if usuario is None:
messages.error(request,'El nombre de usuario y/o contraseña son incorrectos')
return super(Login, self).dispatch(request, *args, **kwargs)
def form_valid(self,form):
login(self.request,form.get_user())
confi = ConfiEmpresa.objects.filter()
if confi.count() == 0:
confi_initial = ConfiEmpresa()
confi_initial.id = 1
confi_initial.save()
return super(Login,self).form_valid(form)
@login_required()
def logoutUser(request):
"""[summary]
Args:
request ([Logout]): [Metodo herado de logout de django para cerrar sesión]
Returns:
[Redirect template]: [Retorna el template del login]
"""
logout(request)
return redirect('/accounts/login/')
@login_required()
def home_user(request):
"""[summary]
Args:
request ([Respuesta del index]): [Nombre de donde va ir redirigido]
Returns:
[Render template]:
[
Se utiliza el metodo render, con los campos del request, y directorio
de donde se encuentra el template
]
"""
context = {
'total_user': total_user(),
'total_cliente': total_cliente(),
'total_mascotas': total_mascotas(),
'total_productos': total_producto(),
'total_stock_minimo': total_stock_minimo(),
'total_pro_vencer': total_productos_a_vencer(),
'total_vacunas_aplicadas' : total_vacunas_aplicadas(),
'total_reservas_hoy': total_reservas_hoy(),
'total_proximas_vacunas': total_vacunas_proximas()
}
return render(request, "home/index.html", context)
@login_required()
@permission_required('usuario.view_user')
def list_usuarios(request):
return render(request, "usuario/list_usuarios.html")
@login_required()
def list_usuarios_ajax(request):
query = request.GET.get('busqueda')
if query != "":
usuario = User.objects.exclude(is_active=False).filter(Q(first_name__icontains=query) | Q(last_name__icontains=query) | Q(username__icontains=query))
usuario = usuario.exclude(is_superuser=True)
else:
usuario = User.objects.exclude(is_active=False).all()
usuario = usuario.exclude(is_superuser=True)
total = usuario.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
usuario = usuario[start:start + length]
data = [{'id': usu.id,'nombre': usu.first_name, 'apellido': usu.last_name, 'email': usu.email, 'username': usu.username} for usu in usuario]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
@permission_required('usuario.view_user')
def list_usuarios_baja(request):
return render(request, "usuario/list_usuarios_baja.html")
@login_required()
def list_usuarios_baja_ajax(request):
query = request.GET.get('busqueda')
if query != "":
usuario = User.objects.exclude(is_active=True).filter(Q(first_name__icontains=query) | Q(last_name__icontains=query) | Q(username__icontains=query))
usuario = usuario.exclude(is_superuser=True)
else:
usuario = User.objects.exclude(is_active=True).all()
usuario = usuario.exclude(is_superuser=True)
total = usuario.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
usuario = usuario[start:start + length]
data = [{'id': usu.id,'nombre': usu.first_name, 'apellido': usu.last_name, 'email': usu.email, 'username': usu.username} for usu in usuario]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
@permission_required('usuario.add_user')
def add_usuario(request):
form = UserForm()
group = Group.objects.all()
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Se ha agregado correctamente!")
return redirect('/usuario/add/')
context = {'form': form}
return render(request, 'usuario/add_usuario.html', context)
@login_required()
@permission_required('usuario.change_user')
def edit_usuario(request, id):
try:
usuario = User.objects.get(id=id)
form = UserFormChange(request.user, instance=usuario)
if request.method == 'POST':
form = UserFormChange(request.user, request.POST, instance=usuario)
if not form.has_changed():
messages.info(request, "No ha hecho ningun cambio")
return redirect('/usuario/edit/' + str(id))
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.add_message(request, messages.SUCCESS, 'Se ha editado correctamente!')
return redirect('/usuario/edit/' + str(id))
context = {'form': form, 'usuario': usuario}
return render(request, 'usuario/edit_usuario.html', context)
except Exception as e:
messages.add_message(request, messages.SUCCESS, 'ha ocurrido un error, intentelo mas tarde!')
return redirect('/usuario/listUsuarios/')
@login_required()
@permission_required('usuario.delete_user')
def baja_usuario(request, id):
user = User.objects.get(id=id)
confirm = True
if request.method == 'POST':
if request.user == user:
messages.error(request, "¡No puedes eliminar este usuario! intentelo mas tarde.")
confirm = False
return redirect('/usuario/listUsuarios/')
else:
user.is_active = False
user.save()
messages.error(request, "Se ha dado de baja correctamente!.")
return redirect('/usuario/listUsuarios/')
context = {"user": user}
return render(request, 'usuario/dar_baja_usuario_modal.html', context)
@login_required()
@permission_required('usuario.delete_user')
def alta_usuario(request, id):
user = User.objects.get(id=id)
if request.user == user:
messages.error(request, "¡No puedes eliminar este usuario! intentelo mas tarde.")
return redirect('/usuario/listUsuariosBaja/')
else:
user.is_active = True
user.save()
messages.error(request, "Se ha dado de alta correctamente!.")
return redirect('/usuario/listUsuariosBaja/')
@login_required()
@permission_required('usuario.change_user')
def change_password(request, id):
form = ContraseñaChangeForm(request.user)
if request.method == 'POST':
form = ContraseñaChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
update_session_auth_hash(request, user)
messages.add_message(request, messages.SUCCESS, 'Se ha editado correctamente!')
return redirect('/usuario/editPassword/' + str(id))
context = {'form': form, 'id': id}
return render(request, 'usuario/edit_password.html', context)
#Roles
def get_group_list(request):
query = request.GET.get('busqueda')
if query != "":
group = Group.objects.filter(Q(name__icontains=query))
else:
group = Group.objects.all()
total = group.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
group = group[start:start + length]
data = [{'id': g.id,'rol': g.name} for g in group]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
@permission_required('usuario.add_user')
def add_rol(request):
form = GroupForm()
if request.method == 'POST':
form = GroupForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Se ha agregado correctamente!")
return redirect('/usuario/addRol/')
else:
messages.error(request, form.errors)
context = {'form': form,'groups': Group.objects.all()}
return render(request, 'usuario/add_rol.html', context)
@login_required()
@permission_required('usuario.add_user')
def edit_rol(request, id):
group = Group.objects.get(id=id)
form = GroupChangeForm(instance=group)
if request.method == 'POST':
form = GroupChangeForm(request.POST, instance=group)
if not form.has_changed():
messages.info(request, "No ha hecho ningun cambio")
return redirect('/usuario/addRol/')
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Se ha editado correctamente!')
return redirect('/usuario/addRol/')
else:
messages.error(request, form.errors)
context = {'form': form,'groups': Group.objects.all()}
return render(request, 'usuario/add_rol.html', context)
@login_required()
@permission_required('usuario.delete_user')
def delete_rol(request, id):
group = Group.objects.get(id=id)
group.delete()
return redirect('/usuario/addRol/')
```
#### File: apps/utiles/models.py
```python
from django.db import models
from datetime import datetime
from apps.ventas.producto.models import Producto
from apps.ventas.factura.models import FacturaCabeceraVenta
date = datetime.now()
# Create your models here.
class Timbrado(models.Model):
nro_timbrado = models.CharField(max_length=500, null=True, blank=True)
fecha_inicio_timbrado = models.CharField(max_length=500, null=True, blank=True)
fecha_fin_timbrado = models.CharField(max_length=500, null=True, blank=True)
vencido = models.CharField(max_length=2, default="N", blank=True, null=True)
def __str__(self):
return 'Timbrado: ' % (self.nro_timbrado)
class Cedula(models.Model):
nro_cedula = models.CharField(max_length=500, null=True, blank=True)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y %H:%M:%S hs"), null=True, blank=True)
class Ruc(models.Model):
nro_ruc = models.CharField(max_length=500, null=True, blank=True)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y %H:%M:%S hs"), null=True, blank=True)
class ProductoVendido(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
cantidad_vendida_total = models.FloatField(null=True, blank=True, default=0)
class ProductoComprados(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
cantidad_comprada_total = models.FloatField(null=True, blank=True, default=0)
class ProductoVendidoMes(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
numero_mes = models.IntegerField(null=True, blank=True)
label_mes = models.CharField(max_length=500, null=True, blank=True)
anho = models.CharField(max_length=500, null=True, blank=True)
cantidad_vendida_total = models.FloatField(null=True, blank=True, default=0)
class ProductoCompradoMes(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
numero_mes = models.IntegerField(null=True, blank=True)
label_mes = models.CharField(max_length=500, null=True, blank=True)
anho = models.CharField(max_length=500, null=True, blank=True)
cantidad_comprada_total = models.FloatField(null=True, blank=True, default=0)
class ServicioVendido(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
cantidad_vendida_total = models.FloatField(null=True, blank=True, default=0)
class VacunasAplicadas(models.Model):
id_producto = models.ForeignKey(Producto, on_delete=models.CASCADE, null=True)
cantidad_aplicadas = models.FloatField(null=True, blank=True, default=0)
class GananciaPorMes(models.Model):
numero_mes = models.IntegerField(null=True, blank=True)
label_mes = models.CharField(max_length=500, null=True, blank=True)
anho = models.CharField(max_length=500, null=True, blank=True)
total_mes = models.FloatField(null=True, blank=True)
total_mes_formateado = models.CharField(max_length=600, null=True, blank=True)
id_factura_venta = models.ForeignKey(FacturaCabeceraVenta, on_delete=models.CASCADE, null=True)
```
#### File: apps/utiles/views.py
```python
import json
from django.shortcuts import render
from django.http import JsonResponse
from datetime import datetime, date
from apps.utiles.models import (Timbrado, ProductoVendido, ProductoComprados,ProductoVendidoMes,
ProductoCompradoMes,ServicioVendido,GananciaPorMes, Cedula, Ruc, VacunasAplicadas)
from apps.ventas.factura.models import FacturaCabeceraVenta, FacturaDetalleVenta
from apps.compras.models import FacturaCompra, FacturaDet, Proveedor
from apps.ventas.producto.models import Producto
from apps.ventas.cliente.models import Cliente
from apps.ventas.mascota.models import Mascota
from apps.configuracion.models import Empleado, ConfiEmpresa
from apps.reserva.models import Reserva
from apps.ventas.mascota.models import HistoricoFichaMedica
from apps.usuario.models import User
hoy = date.today()
# Create your views here.
label_mes = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto',
'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']
def reset_nro_timbrado(nro_timbrado):
if nro_timbrado is not None:
try:
factura = FacturaCabeceraVenta.objects.filter(nro_timbrado=nro_timbrado)
if factura.count() > 0:
nro_factura = factura.count() + 1
else:
nro_factura = 1
except Exception as e:
nro_factura = 1
else:
nro_factura = 1
return nro_factura
def poner_vencido_timbrado(request):
mensaje = ""
try:
timbrado = Timbrado.objects.get(nro_timbrado=request.GET.get('nro_timbrado'))
timbrado.vencido = "S"
timbrado.save()
mensaje = "OK"
response = {"mensaje": mensaje}
return JsonResponse(response)
except Exception as e:
response = {"mensaje": "ERROR"}
return JsonResponse(response)
def validate_nro_timbrado(request):
mensaje = ""
try:
timbrado_vencido = Timbrado.objects.get(nro_timbrado=request.GET.get('nro_timbrado'))
if timbrado_vencido.vencido == "S":
mensaje = "OK"
response = {"mensaje": mensaje}
return JsonResponse(response)
except Exception as e:
response = {"mensaje": mensaje}
return JsonResponse(response)
def cargar_productos_vendidos():
facturaVenta = FacturaCabeceraVenta.objects.exclude(factura_anulada='S')
try:
if facturaVenta is not None:
for fv in facturaVenta:
facturaDetalle = FacturaDetalleVenta.objects.filter(id_factura_venta=fv.id)
for factDet in facturaDetalle:
if factDet.tipo != 'S':
if factDet.detalle_cargado_reporte == 'N':
factDet.detalle_cargado_reporte = "S"
factDet.save()
try:
produc = ProductoVendido.objects.get(id_producto=factDet.id_producto.id)
produc.cantidad_vendida_total += factDet.cantidad
produc.save()
except Exception as e:
produc = ProductoVendido()
pro_id = Producto.objects.get(id=factDet.id_producto.id)
produc.id_producto = pro_id
produc.cantidad_vendida_total = factDet.cantidad
produc.save()
except Exception as e:
pass
def cargar_productos_comprados():
facturaCompra = FacturaCompra.objects.all()
try:
if facturaCompra is not None:
for fc in facturaCompra:
facturaDetalle = FacturaDet.objects.filter(id_factura=fc.id)
for factDet in facturaDetalle:
if factDet.detalle_cargado_reporte == 'N':
factDet.detalle_cargado_reporte = "S"
factDet.save()
try:
produc = ProductoComprados.objects.get(id_producto=factDet.id_producto.id)
produc.cantidad_comprada_total += factDet.cantidad
produc.save()
except Exception as e:
produc = ProductoComprados()
pro_id = Producto.objects.get(id=factDet.id_producto.id)
produc.id_producto = pro_id
produc.cantidad_comprada_total = factDet.cantidad
produc.save()
except Exception as e:
pass
def cargar_producto_vendido_mes():
facturaVenta = FacturaCabeceraVenta.objects.exclude(factura_anulada='S')
try:
if facturaVenta is not None:
for fv in facturaVenta:
fecha_split = fv.fecha_alta.split('/')
facturaDetalle = FacturaDetalleVenta.objects.filter(id_factura_venta=fv.id)
for factDet in facturaDetalle:
if factDet.tipo != 'S':
if factDet.detalle_cargado_mes == 'N':
factDet.detalle_cargado_mes = "S"
factDet.save()
try:
produc = ProductoVendidoMes.objects.filter(anho=fecha_split[2])
if produc.count() == 0:
produc = produc.get(anho=fecha_split[2])
produc = produc.get(numero_mes=int(fecha_split[1]))
produc.cantidad_vendida_total += factDet.cantidad
produc.save()
except Exception as e:
produc = ProductoVendidoMes()
pro_id = Producto.objects.get(id=factDet.id_producto.id)
produc.id_producto = pro_id
produc.label_mes = label_mes[int(fecha_split[1]) - 1]
produc.numero_mes = int(fecha_split[1])
produc.anho = fecha_split[2]
produc.cantidad_vendida_total = factDet.cantidad
produc.save()
except Exception as e:
pass
def cargar_productos_comprado_mes():
facturaCompra = FacturaCompra.objects.all()
try:
if facturaCompra is not None:
for fc in facturaCompra:
fecha_split = fc.fecha_alta.split('/')
facturaDetalle = FacturaDet.objects.filter(id_factura=fc.id)
for factDet in facturaDetalle:
if factDet.detalle_cargado_mes == 'N':
factDet.detalle_cargado_mes = "S"
factDet.save()
try:
produc = ProductoCompradoMes.objects.filter(anho=fecha_split[2])
if produc.count() == 0:
produc = produc.get(anho=fecha_split[2])
produc = produc.get(numero_mes=int(fecha_split[1]))
produc.cantidad_comprada_total += factDet.cantidad
produc.save()
except Exception as e:
produc = ProductoCompradoMes()
pro_id = Producto.objects.get(id=factDet.id_producto.id)
produc.id_producto = pro_id
produc.label_mes = label_mes[int(fecha_split[1]) - 1]
produc.numero_mes = int(fecha_split[1])
produc.anho = fecha_split[2]
produc.cantidad_comprada_total = factDet.cantidad
produc.save()
except Exception as e:
pass
def cargar_servicios_vendidos():
facturaVenta = FacturaCabeceraVenta.objects.exclude(factura_anulada='S')
try:
if facturaVenta is not None:
for fv in facturaVenta:
facturaDetalle = FacturaDetalleVenta.objects.filter(id_factura_venta=fv.id)
for factDet in facturaDetalle:
if factDet.tipo != 'P':
if factDet.detalle_cargado_servicio == 'N':
factDet.detalle_cargado_servicio = "S"
factDet.save()
try:
produc = ServicioVendido.objects.get(id_producto=factDet.id_producto.id)
produc.cantidad_vendida_total += factDet.cantidad
produc.save()
except Exception as e:
produc = ServicioVendido()
pro_id = Producto.objects.get(id=factDet.id_producto.id)
produc.id_producto = pro_id
produc.cantidad_vendida_total = factDet.cantidad
produc.save()
except Exception as e:
pass
def cargar_ganacias_por_mes():
facturaVenta = FacturaCabeceraVenta.objects.exclude(factura_anulada='S').all()
try:
for fv in facturaVenta:
if fv.factura_to_reporte == 'N':
fv.factura_to_reporte = 'S'
fv.save()
fecha_split = fv.fecha_alta.split('/')
try:
reporte_ga = GananciaPorMes.objects.filter(anho=fecha_split[2])
if reporte_ga.count() == 0:
reporte_ga = reporte_ga.get(anho=fecha_split[2])
reporte_ga = reporte_ga.get(numero_mes=int(fecha_split[1]))
reporte_ga.total_mes += int(fv.total)
reporte_ga.total_mes_formateado = "Gs. " + '{0:,}'.format(reporte_ga.total_mes)
reporte_ga.save()
except Exception as e:
reporte_ga = GananciaPorMes()
reporte_ga.id_factura_venta = fv
reporte_ga.label_mes = label_mes[int(fecha_split[1]) - 1]
reporte_ga.numero_mes = int(fecha_split[1])
reporte_ga.anho = fecha_split[2]
reporte_ga.total_mes = int(fv.total)
reporte_ga.total_mes_formateado = "Gs. " + '{0:,}'.format(reporte_ga.total_mes)
reporte_ga.save()
except Exception as e:
pass
def validar_cedula(request):
cedula = request.GET.get('cedula')
mensaje = ""
try:
list_cedula = Cedula.objects.filter(nro_cedula=cedula)
if Cliente.objects.filter(cedula=cedula).exists():
mensaje = 'EX'
response = {'mensaje': mensaje}
return JsonResponse(response)
elif Empleado.objects.filter(ci_empe=cedula).exists():
mensaje = 'EX'
response = {'mensaje': mensaje}
return JsonResponse(response)
else:
mensaje = 'OK'
response = {'mensaje': mensaje}
return JsonResponse(response)
except Exception as e:
mensaje = 'ER'
response = {'mensaje': mensaje}
return JsonResponse(response)
def validar_ruc(request):
obj_validar = request.GET.get('ruc')
mensaje = ""
try:
list_validaciones = Ruc.objects.filter(nro_ruc=obj_validar)
if Cliente.objects.filter(ruc=obj_validar).exists():
mensaje = 'EX'
response = {'mensaje': mensaje}
return JsonResponse(response)
elif Proveedor.objects.filter(ruc_proveedor=obj_validar).exists():
mensaje = 'EX'
response = {'mensaje': mensaje}
return JsonResponse(response)
elif ConfiEmpresa.objects.filter(ruc_empresa=obj_validar):
mensaje = 'EX'
response = {'mensaje': mensaje}
return JsonResponse(response)
else:
mensaje = 'OK'
response = {'mensaje': mensaje}
return JsonResponse(response)
except Exception as e:
mensaje = 'ER'
response = {'mensaje': mensaje}
return JsonResponse(response)
def get_reserva_today(request):
data = []
try:
fecha_hoy = date(hoy.year, hoy.month, hoy.day)
reservas = Reserva.objects.exclude(estado_re='FIN').filter(fecha_reserva=fecha_hoy)
reservas = reservas.exclude(estado_re='CAN').all()
total = reservas.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
reservas = reservas[start:start + length]
data =[{'cliente': r.id_cliente.nombre_cliente + '\n' + r.id_cliente.apellido_cliente,
'mascota': r.id_mascota.nombre_mascota,'evento': 'Servicio: ' + r.id_servicio.nombre_servicio + '\n' + 'Horario: ' + r.hora_reserva} for r in reservas]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
except Exception as e:
response = {
'data': data,
'recordsTotal': 0,
'recordsFiltered': 0,
}
return JsonResponse(response)
def get_vacunas_today(request):
data = []
try:
fecha_hoy = hoy.strftime("%d/%m/%Y")
historico = HistoricoFichaMedica.objects.filter(fecha_proxima_aplicacion=fecha_hoy)
total = historico.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
historico = historico[start:start + length]
data =[{'cliente': h.id_mascota.id_cliente.nombre_cliente + '\n' + h.id_mascota.id_cliente.apellido_cliente,
'mascota': h.id_mascota.nombre_mascota, 'evento': 'Vacuna: ' + h.proxima_vacunacion} for h in historico]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
except Exception as e:
response = {
'data': data,
'recordsTotal': 0,
'recordsFiltered': 0,
}
return JsonResponse(response)
def total_user():
try:
usuario = User.objects.exclude(is_active=False).all()
usuario = usuario.exclude(is_superuser=True)
return usuario.count()
except Exception as e:
return 0
def total_cliente():
try:
cliente = Cliente.objects.exclude(is_active="N").all()
return cliente.count()
except Exception as e:
return 0
def total_mascotas():
try:
mascotas = Mascota.objects.all()
return mascotas.count()
except Exception as e:
return 0
def total_producto():
try:
productos = Producto.objects.exclude(is_active="N").all()
productos = productos.exclude(servicio_o_producto="S")
productos = productos.exclude(producto_vencido="S")
return productos.count()
except Exception as e:
return 0
def total_stock_minimo():
prod_minimo = []
try:
productos = Producto.objects.exclude(is_active="N").order_by('-last_modified')
productos = productos.exclude(servicio_o_producto="S")
productos = productos.exclude(producto_vencido="S")
for p in productos:
if p.stock_minimo >= p.stock_total:
prod_minimo.append(p)
total = len(prod_minimo)
return total
except Exception as e:
return 0
def total_productos_a_vencer():
prod_vencimiento = []
try:
confi = ConfiEmpresa.objects.get(id=1)
dias_compare = confi.dias_a_vencer
except Exception as e:
dias_compare = 30
try:
productos = Producto.objects.exclude(is_active="N").order_by('-last_modified')
productos = productos.exclude(servicio_o_producto="S")
for p in productos:
if p.fecha_vencimiento is not None:
if rest_dates(p.fecha_vencimiento) <= dias_compare:
prod_vencimiento.append(p)
total = len(prod_vencimiento)
return total
except Exception as e:
return 0
def total_vacunas_aplicadas():
try:
historico = HistoricoFichaMedica.objects.all()
total = historico.count()
return total
except Exception as e:
return 0
def total_reservas_hoy():
try:
fecha_hoy = date(hoy.year, hoy.month, hoy.day)
reservas = Reserva.objects.filter(fecha_reserva=fecha_hoy)
total = reservas.count()
return total
except Exception as e:
return 0
def total_vacunas_proximas():
ficha = []
try:
confi = ConfiEmpresa.objects.get(id=1)
dias_compare = confi.dias_alert_vacunas
except Exception as e:
dias_compare = 30
try:
list_historico = HistoricoFichaMedica.objects.all()
for f in list_historico:
if f.fecha_proxima_aplicacion is not None:
if rest_dates(f.fecha_proxima_aplicacion) <= dias_compare:
ficha.append(f)
total = len(ficha)
return total
except Exception as e:
return 0
def cargar_vacunas_aplicadas():
vacunas_aplicadas = HistoricoFichaMedica.objects.all()
try:
for va in vacunas_aplicadas:
if va.historico_cargado_reporte == 'N':
va.historico_cargado_reporte = 'S'
va.save()
try:
produc = VacunasAplicadas.objects.get(id_producto=va.vacuna.id_producto.id)
produc.cantidad_aplicadas += 1
produc.save()
except Exception as e:
produc = VacunasAplicadas()
pro_id = Producto.objects.get(id=va.vacuna.id_producto.id)
produc.id_producto = pro_id
produc.cantidad_aplicadas = 1
produc.save()
except Exception as e:
pass
def rest_dates(fecha_vencimiento):
try:
fechaDate = date(hoy.year, hoy.month, hoy.day)
fecha_vencimiento_split = fecha_vencimiento.split('/')
fecha_vencimiento_compare = date(int(fecha_vencimiento_split[2]), int(fecha_vencimiento_split[1]), int(fecha_vencimiento_split[0]))
return (fecha_vencimiento_compare - hoy).days if (fecha_vencimiento_compare - hoy).days >= 0 else 0
except Exception as e:
return 0
```
#### File: ventas/mascota/views.py
```python
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib import messages
from django.db.models import Q
from django.core.paginator import Paginator
from django.http import JsonResponse
from datetime import date, datetime
import json
import math
from apps.ventas.mascota.models import Mascota, Especie, Raza, Raza, FichaMedica, Vacuna, Consulta, Antiparasitario, HistoricoFichaMedica
from .form import MascotaForm, EspecieForm, RazaForm, FichaMedicaForm, VacunaForm, ConsultaForm, AntiparasitarioForm
from apps.configuracion.models import TipoVacuna
from apps.ventas.producto.models import Producto
date = datetime.now()
# Create your views here.
@login_required()
@permission_required('mascota.add_mascota')
def add_mascota(request):
form = MascotaForm
if request.method == 'POST':
form = MascotaForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Se ha agregado correctamente!')
return redirect('/mascota/add')
context = {'form' : form}
return render(request, 'ventas/mascota/add_mascota.html', context)
# Metodo para editar Mascotas
@login_required()
@permission_required('mascota.view_mascota')
def edit_mascota(request, id):
mascota = Mascota.objects.get(id=id)
form = MascotaForm(instance=mascota)
if request.method == 'POST':
form = MascotaForm(request.POST, request.FILES, instance=mascota)
if not form.has_changed():
messages.info(request, "No has hecho ningun cambio!")
return redirect('/mascota/edit/' + str(id))
if form.is_valid():
mascota = form.save(commit=False)
mascota.save()
messages.success(request, 'Se ha editado correctamente!')
return redirect('/mascota/edit/' + str(id))
context = {'form': form, 'mascota': mascota}
return render(request, 'ventas/mascota/edit_mascota.html', context)
@login_required()
@permission_required('mascota.view_mascota')
def list_mascotas(request):
mascotas = Mascota.objects.all().order_by('-last_modified')
paginator = Paginator(mascotas, 8)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj' : page_obj}
return render(request, "ventas/mascota/list_mascotas.html", context)
@login_required()
def search_mascota(request):
query = request.GET.get('q')
if query:
mascota = Mascota.objects.filter(
Q(nombre_mascota__icontains=query) |
Q(id_cliente__nombre_cliente__icontains=query) |
Q(id_cliente__apellido_cliente__icontains=query))
else:
mascota = Mascota.objects.all().order_by('-last_modified')
paginator = Paginator(mascota, 8)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = { 'page_obj': page_obj}
return render(request, "ventas/mascota/list_mascotas.html", context)
"""
Functions of Epecies
"""
@login_required()
@permission_required('mascota.add_especie')
def add_especie(request):
form = EspecieForm
if request.method == 'POST':
form = EspecieForm(request.POST or None)
if form.is_valid():
messages.success(request, 'Se ha agregado correctamente!')
form.save()
return redirect('/mascota/listEspecie/')
context = {'form' : form}
return render(request, 'ventas/mascota/especie/add_especie_modal.html', context)
@login_required()
@permission_required('mascota.change_especie')
def edit_especie(request, id):
especies = Especie.objects.get(id=id)
form = EspecieForm(instance=especies)
if request.method == 'POST':
form = EspecieForm(request.POST, instance=especies)
if not form.has_changed():
messages.info(request, "No has hecho ningun cambio!")
return redirect('/mascota/listEspecie/')
if form.is_valid():
especies = form.save(commit=False)
especies.save()
messages.success(request, 'Se ha editado correctamente!')
return redirect('/mascota/listEspecie/')
context = {'form' : form, 'especie': especies}
return render(request, 'ventas/mascota/especie/edit_especie_modal.html', context)
@login_required()
@permission_required('mascota.view_especie')
def list_especie(request):
especie = Especie.objects.all().order_by('-last_modified')
paginator = Paginator(especie, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj' : page_obj}
return render(request, "ventas/mascota/especie/list_especie.html", context)
@login_required()
def list_especie_ajax(request):
query = request.GET.get('busqueda')
if query != "":
especie = Especie.objects.filter(Q(nombre_especie__icontains=query))
else:
especie = Especie.objects.all().order_by('-last_modified')
total = especie.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
especie = especie[start:start + length]
data = [{'id': espe.id, 'nombre': espe.nombre_especie} for espe in especie]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
def search_especie(request):
query = request.GET.get('q')
if query:
especie = Especie.objects.filter(Q(nombre_especie__icontains=query))
else:
especie = Especie.objects.all().order_by('-last_modified')
paginator = Paginator(especie, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = { 'page_obj': page_obj}
return render(request, "ventas/mascota/especie/list_especie.html", context)
"""
Functions of Razas
"""
@login_required()
@permission_required('mascota.add_raza')
def add_raza(request):
form = RazaForm
if request.method == 'POST':
form = RazaForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, 'Se ha agregado correctamente!')
return redirect('/mascota/listRaza/')
context = {'form' : form}
return render(request, 'ventas/mascota/raza/add_raza_modal.html', context)
@login_required()
@permission_required('mascota.change_raza')
def edit_raza(request, id):
raza = Raza.objects.get(id=id)
form = RazaForm(instance=raza)
if request.method == 'POST':
form = RazaForm(request.POST, instance=raza)
if not form.has_changed():
messages.info(request, "No has hecho ningun cambio!")
return redirect('/mascota/listRaza/')
if form.is_valid():
raza = form.save(commit=False)
raza.save()
messages.success(request, 'Se ha editado correctamente!')
return redirect('/mascota/listRaza/')
context = {'form' : form, 'raza': raza}
return render(request, 'ventas/mascota/raza/edit_raza_modal.html', context)
@login_required()
@permission_required('mascota.view_raza')
def list_raza(request):
raza = Raza.objects.all().order_by('-last_modified')
raza_especie = RazaForm
paginator = Paginator(raza, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj' : page_obj, 'form': raza_especie}
return render(request, "ventas/mascota/raza/list_raza.html", context)
@login_required()
def get_list_raza_ajax(request):
query = request.GET.get('busqueda')
if query != "":
raza = Raza.objects.filter(Q(nombre_raza__icontains=query) | Q(id_especie__nombre_especie__icontains=query) )
else:
raza = Raza.objects.all().order_by('-last_modified')
total = raza.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
raza = raza[start:start + length]
data = [{'id': ra.id, 'nombre_raza': ra.nombre_raza, 'nombre_especie': ra.id_especie.nombre_especie } for ra in raza]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
@login_required()
def search_raza(request):
query = request.GET.get('q')
if query:
raza = Raza.objects.filter(Q(nombre_raza__icontains=query))
else:
raza = Raza.objects.all().order_by('-last_modified')
paginator = Paginator(raza, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = { 'page_obj': page_obj}
return render(request, "ventas/mascota/raza/list_raza.html", context)
#Funciones de Ficha Medicas
@login_required()
@permission_required('mascota.view_mascota')
def edit_ficha_medica(request,id):
mascota = Mascota.objects.get(id=id)
fichaMedicaGet = FichaMedica.objects.get(id_mascota=id)
vacunaGet = Vacuna.objects.get(id_ficha_medica=fichaMedicaGet.id)
consultaGet = Consulta.objects.get(id_ficha_medica=fichaMedicaGet.id)
antiparasitarioGet = Antiparasitario.objects.get(id_ficha_medica=fichaMedicaGet.id)
historicoFichaMedica = HistoricoFichaMedica
vacuna_aplicado = []
vacuna_proxima = []
vacunas = TipoVacuna.objects.all()
try:
if request.method == 'POST':
formFichaMedica = FichaMedicaForm(request.POST, instance=fichaMedicaGet)
formVacuna = VacunaForm(request.POST, instance=vacunaGet)
formConsulta = ConsultaForm(request.POST, instance=consultaGet)
formAntiparasitario = AntiparasitarioForm(request.POST, instance=antiparasitarioGet)
if not formVacuna.has_changed() and not formConsulta.has_changed() and not formAntiparasitario.has_changed():
messages.info(request, "No has hecho ningun cambio!")
return redirect('/mascota/editFichaMedica/' + str(id))
if formVacuna.is_valid() or formConsulta.is_valid() or formAntiparasitario.is_valid():
proxima_vacuna = request.POST.get('proxima_vacunacion')
fecha_proxima_vacuna = request.POST.get('fecha_proxima_aplicacion')
antiparasitario_aplicado = request.POST.get('antipara')
proximo_antiparasitario_aplicado = request.POST.get('proximo_antipara')
consulta = formConsulta.save(commit=False)
vacuna = formVacuna.save(commit=False)
antiparasitario = formAntiparasitario.save(commit=False)
fichaMedica = formFichaMedica.save(commit=False)
fichaMedica.save()
vacuna.save()
antiparasitario.save()
consulta.save()
historicoFichaMedica = create_historico_ficha_medica(id, proxima_vacuna, antiparasitario_aplicado, proximo_antiparasitario_aplicado)
messages.success(request, 'Se ha editado correctamente!')
return redirect('/mascota/editFichaMedica/' + str(id))
except Exception as e:
pass
if vacunas.count() > 0:
list_historico = HistoricoFichaMedica.objects.filter(id_ficha_medica=id)
if list_historico.count() > 0:
for vacu in vacunas:
try:
vacu_historico = list_historico.get(vacuna=vacu)
if vacu.multi_aplicaciones == 'S':
vacuna_aplicado.append(vacu)
vacuna_proxima.append(vacu)
except Exception as e:
vacuna_aplicado.append(vacu)
vacuna_proxima.append(vacu)
else:
for va in vacunas:
vacuna_aplicado.append(va)
vacuna_proxima.append(va)
formFichaMedica = FichaMedicaForm(instance=fichaMedicaGet)
formVacuna = VacunaForm(instance=vacunaGet)
formConsulta = ConsultaForm(instance=consultaGet)
formAntiparasitario = AntiparasitarioForm(instance=antiparasitarioGet)
context = {
'mascota': mascota,
'formFichaMedica': formFichaMedica,
'formVacuna': formVacuna,
'formConsulta': formConsulta,
'formAntiparasitario': formAntiparasitario,
'fichaMedicaGet': fichaMedicaGet,
'vacunas_proxima': vacuna_proxima,
'vacunas_aplicada': vacuna_aplicado
}
return render(request, "ventas/mascota/ficha_medica/edit_ficha_medica.html", context)
def get_prox_vacuna(request):
vacuna_aplicada = TipoVacuna.objects.get(id=request.GET.get('id_vacuna'))
list_vacunas = TipoVacuna.objects.filter(id_producto=vacuna_aplicada.id_producto)
vacuna_proxima = []
data = []
list_historico = HistoricoFichaMedica.objects.filter(id_ficha_medica=request.GET.get('ficha_id'))
if list_historico.count() > 0:
try:
if vacuna_aplicada.multi_aplicaciones == 'N':
vacunas = list_vacunas.exclude(nombre_vacuna=vacuna_aplicada.nombre_vacuna)
else:
vacunas = list_vacunas
for vacu in vacunas:
try:
vacu_historico = list_historico.get(vacuna=vacu)
if vacu.multi_aplicaciones == 'S':
vacuna_proxima.append(vacu)
except Exception as e:
vacuna_proxima.append(vacu)
except Exception as e:
for va in list_vacunas:
vacuna_proxima.append(va)
else:
if vacuna_aplicada.multi_aplicaciones == 'N':
vacunas = list_vacunas.exclude(nombre_vacuna=vacuna_aplicada.nombre_vacuna)
else:
vacunas = list_vacunas
for va in vacunas:
vacuna_proxima.append(va)
try:
for priodad_vacuna in list_vacunas:
if int(priodad_vacuna.periodo_aplicacion) <= int(vacuna_aplicada.periodo_aplicacion):
if priodad_vacuna.multi_aplicaciones == 'N':
try:
vacuna_proxima.remove(priodad_vacuna)
except:
pass
except Exception as e:
pass
data = [{'id': v.id, 'nombre_vacuna': v.nombre_vacuna } for v in vacuna_proxima]
list_vacunas_proximas = json.dumps(data)
response = {'proximas_vacunas': list_vacunas_proximas}
return JsonResponse(response)
#Historico de Ficha Medica
def list_historial(request, id):
context = {'id_mascota': id}
return render(request, "ventas/mascota/ficha_medica/list_historico.html", context)
def get_list_historico_vacunas_aplicadas(request):
query = request.GET.get('busqueda')
vacunas_aplicadas = HistoricoFichaMedica.objects.filter(id_ficha_medica=query)
total = vacunas_aplicadas.count()
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
vacunas_aplicadas = vacunas_aplicadas[start:start + length]
data = [{'fecha_aplicada': va.fecha_aplicacion, 'vacuna_aplicada': va.vacuna.nombre_vacuna,
'peso': va.peso} for va in vacunas_aplicadas]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
def get_list_historico_vacunas_proximas(request):
query = request.GET.get('busqueda')
vacunas_object = []
vacunas_proximas = HistoricoFichaMedica.objects.filter(id_ficha_medica=query)
for vp in vacunas_proximas:
if vp.fecha_proxima_aplicacion is not None:
vacunas_object.append(vp)
total = len(vacunas_object)
_start = request.GET.get('start')
_length = request.GET.get('length')
if _start and _length:
start = int(_start)
length = int(_length)
page = math.ceil(start / length) + 1
per_page = length
vacunas_object = vacunas_object[start:start + length]
data = [{'id': va.id, 'fecha_proxima': va.fecha_proxima_aplicacion,
'proxima_vacuna': va.proxima_vacunacion} for va in vacunas_object]
response = {
'data': data,
'recordsTotal': total,
'recordsFiltered': total,
}
return JsonResponse(response)
def create_historico_ficha_medica(id, proxima_vacunacion, antiparasitario_aplicado, proximo_antiparasitario_aplicado):
historico = HistoricoFichaMedica()
try:
historico_anteriores = HistoricoFichaMedica.objects.filter(id_ficha_medica=id)
if historico_anteriores is not None:
for hist in historico_anteriores:
hist.fecha_proxima_aplicacion = None
hist.save()
if proxima_vacunacion != '-------':
try:
proxima_vacuna = TipoVacuna.objects.get(id=proxima_vacunacion)
except Exception as e:
pass
mascota = Mascota.objects.get(id=id)
fichaMedicaGet = FichaMedica.objects.get(id_mascota=id)
vacunaGet = Vacuna.objects.get(id_ficha_medica=fichaMedicaGet.id)
consultaGet = Consulta.objects.get(id_ficha_medica=fichaMedicaGet.id)
antiparasitarioGet = Antiparasitario.objects.get(id_ficha_medica=fichaMedicaGet.id)
if vacunaGet.id_vacuna is not None:
producto = Producto.objects.get(id=vacunaGet.id_vacuna.id_producto.id)
producto.stock_total = producto.stock_total - 1
producto.stock = producto.stock - 1
producto.save()
historico.fecha_alta = date.strftime("%d/%m/%Y")
historico.vacuna = vacunaGet.id_vacuna
if proxima_vacunacion != '-------':
historico.proxima_vacunacion = proxima_vacuna.nombre_vacuna
else:
historico.proxima_vacunacion = "-"
if antiparasitario_aplicado != "Buscar":
try:
producto_anti = Producto.objects.get(id=antiparasitario_aplicado)
producto_anti.stock_total = producto_anti.stock_total - 1
producto_anti.stock = producto_anti.stock - 1
producto_anti.save()
anti_aplicado = producto_anti.nombre_producto
except Exception as e:
anti_aplicado = "-"
else:
anti_aplicado = "-"
if proximo_antiparasitario_aplicado != "Buscar":
try:
producto_anti_proximo = Producto.objects.get(id=proximo_antiparasitario_aplicado)
producto_anti_proximo.stock_total = producto_anti_proximo.stock_total - 1
producto_anti_proximo.stock = producto_anti_proximo.stock - 1
producto_anti_proximo.save()
anti_proximo_aplicado = producto_anti_proximo.nombre_producto
except Exception as e:
anti_proximo_aplicado = "-"
else:
anti_proximo_aplicado = "-"
historico.diagnostico = consultaGet.diagnostico
historico.tratamiento = consultaGet.proximo_tratamiento
historico.proximo_tratamiento = consultaGet.proximo_tratamiento
historico.medicamento = consultaGet.medicamento
historico.fecha_aplicacion = date.strftime("%d/%m/%Y")
historico.fecha_proxima_aplicacion = vacunaGet.fecha_proxima_aplicacion
historico.antiparasitario = '-'
historico.proximo_antiparasitario = "-"
historico.peso = mascota.peso
historico.last_modified = fichaMedicaGet.fecha_create
historico.id_ficha_medica = id
historico.id_mascota = mascota
historico.save()
vacunaGet.id_vacuna = None
vacunaGet.proxima_vacuna = "-"
consultaGet.diagnostico = "-"
consultaGet.tratamiento = "-"
consultaGet.proximo_tratamiento = "-"
consultaGet.medicamento = "-"
vacunaGet.fecha_aplicacion = None
vacunaGet.fecha_proxima_aplicacion = None
antiparasitarioGet.antiparasitario = "-"
antiparasitarioGet.proximo_antiparasitario = "-"
formVacuna = VacunaForm(instance=vacunaGet)
formConsulta = ConsultaForm(instance=consultaGet)
formAntiparasitario = AntiparasitarioForm(instance=antiparasitarioGet)
vacunaGet.save()
consultaGet.save()
antiparasitarioGet.save()
except Exception as e:
pass
return historico
```
#### File: ventas/producto/models.py
```python
from django.db import models
from datetime import datetime
# Create your models here.
date = datetime.now()
class TipoProducto(models.Model):
"""
Clase que define la estructura de un tipo de producto
"""
opciones = (
('S', 'Si'),
('N', 'No'),
)
nombre_tipo = models.CharField(max_length = 200, help_text = "Ingrese nombre del tipo de producto")
fecha_alta = models.CharField(max_length = 200, default = datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S hs"), editable = False)
fecha_baja = models.CharField(max_length = 200, default = '-', null = True, blank = True)
vence = models.CharField(max_length=2, choices=opciones, default="S", blank=True, null=True, help_text='El producto vence?')
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Mwta:
verbose_name = "Tipo Producto"
verbose_name_plural = "Tipo Productos"
default_permissions = ()
permissions = (
('add_tipoproducto', 'Agregar Tipo Producto'),
('change_tipoproducto', 'Editar Tipo Producto'),
('delete_tipoproducto', 'Eliminar Tipo Producto'),
('view_tipoproducto', 'Listar Tipo Productos'))
def __str__(self):
"""Formato del tipo producto"""
return '{0}'.format(self.nombre_tipo)
def get_absolute_url(self):
"""Retorna el URL para acceder a una instancia de un tipo de producto en particular."""
return reverse('tipoProducto-detail', args=[str(self.id)])
class Deposito(models.Model):
"""
Clase que define la estructura de un deposito
"""
descripcion = models.CharField(max_length = 200, help_text = "Ingrese descripcion del deposito")
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Mwta:
verbose_name = "Deposito"
verbose_name_plural = "Depositos"
default_permissions = ()
permissions = (
('add_deposito', 'Agregar Deposito'),
('change_deposito', 'Editar Deposito'),
('delete_deposito', 'Eliminar Deposito'),
('view_deposito', 'Listar Depositos'))
def __str__(self):
"""Formato de la ciudad"""
return '{0}'.format(self.descripcion)
def get_absolute_url(self):
"""Retorna el URL para acceder a una instancia de un deposito en particular."""
return reverse('deposito-detail', args=[str(self.id)])
class Producto(models.Model):
"""
Clase que define la estructura de un producto
"""
nombre_producto = models.CharField(max_length = 500, help_text = "Ingrese nombre del producto")
descripcion = models.CharField(max_length = 500, help_text = "Ingrese descripcion del producto")
fecha_vencimiento = models.CharField(max_length = 200,null = True, blank = True)
fecha_baja = models.CharField(max_length = 200, default = '-', null = True, blank = True)
fecha_movimiento = models.CharField(max_length = 200, null = True, blank = True)
tipo_producto = models.ForeignKey('TipoProducto', on_delete=models.CASCADE, null=True)
fecha_compra = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y"), editable = False)
precio_compra = models.CharField(max_length = 500,help_text = 'Ingrese precio de compra', blank=True, null=True, default="0")
precio_venta = models.CharField(max_length = 500, help_text = 'Ingrese precio de venta')
stock_minimo = models.IntegerField(help_text = 'Ingrese stock minimo')
lote = models.CharField(max_length = 200, null = True, blank = True)
stock = models.IntegerField(help_text = 'Ingrese stock minimo')
stock_total = models.IntegerField(null=True, blank=True)
stock_movido = models.IntegerField(blank = True, null=True, default=0)
servicio_o_producto = models.CharField(max_length=2, default="P", blank=True, null=True)
producto_vencido = models.CharField(max_length=2, default="N", blank=True, null=True)
id_servicio = models.IntegerField(blank = True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_deposito = models.ForeignKey('Deposito', on_delete=models.CASCADE, null=True)
class Mwta:
verbose_name = "Producto"
verbose_name_plural = "Productos"
default_permissions = ()
permissions = (
('add_producto', 'Agregar Producto'),
('change_producto', 'Editar Producto'),
('delete_producto', 'Eliminar Producto'),
('view_producto', 'Listar Productos'))
def __str__(self):
"""Formato del producto"""
return '{0}'.format(self.nombre_producto)
def get_absolute_url(self):
"""Retorna el URL para acceder a una instancia de un producto en particular."""
return reverse('producto-detail', args=[str(self.id)])
def obtener_dict(self):
dict = {}
dict['codigo_producto'] = self.id
dict['nombre'] = self.nombre_producto
dict['description'] = self.descripcion
dict['precio'] = self.precio_venta
dict['precio_compra'] = self.precio_compra
dict['stock_sistema'] = self.stock_total
dict['tipo'] = self.servicio_o_producto
return dict
class ProductoStock (models.Model):
"""
Clase que define el detalle de los producto que se han movidos
"""
producto_stock = models.IntegerField(help_text = 'Ingrese stock')
id_deposito = models.ForeignKey('Deposito', on_delete=models.CASCADE, null=False)
id_producto = models.ForeignKey('Producto', on_delete=models.CASCADE, null=False)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
def _str_(self):
"""Formato del Stock"""
return '{0}'.format(self.id_producto.nombre_producto)
def get_absolute_url(self):
"""Retorna el URL para acceder a una instancia de un en particular."""
return reverse('deposito-detail', args=[str(self.id)])
class Inventario(models.Model):
stock_viejo = models.IntegerField(blank = True, null=True, default=0)
stock_fisico = models.IntegerField(blank = True, null=True, default=0)
diferencia = models.IntegerField(blank = True, null=True, default=0)
id_producto = models.ForeignKey('Producto', on_delete=models.CASCADE, null=False)
fecha_alta = models.CharField(max_length=500, default = datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S hs"), null=True)
class Mwta:
verbose_name = "Inventario"
verbose_name_plural = "Inventarios"
default_permissions = ()
permissions = (
('add_inventario', 'Agregar Inventario'),
('change_inventario', 'Editar Inventario'),
('delete_inventario', 'Eliminar Inventario'),
('view_inventario', 'Listar Inventarios'))
class HistoricoProductoPrecio(models.Model):
id_producto = models.ForeignKey('Producto', on_delete=models.CASCADE, null=False)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y"), null=True)
precio_compra = models.CharField(max_length=500, null=True, blank=True)
class Mwta:
verbose_name = "Historico Producto"
verbose_name_plural = "Historicos Productos"
``` |
{
"source": "joseignaciodg/theHOC-Application",
"score": 3
} |
#### File: SO3_THEHOC_2021-22_code-folder/Scripts Python Rsapberry/cliente_mod.py
```python
import socket
import signal
from datetime import datetime
import RPi.GPIO as GPIO
import mfrc522
import signal
continue_reading = True
ClientMultiSocket = socket.socket()
host = '192.168.138.45'
port = 1234
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal,frame):
global continue_reading
print ("Ctrl+C captured, ending read.")
continue_reading = False
GPIO.cleanup()
print('Waiting for server response...')
try:
ClientMultiSocket.connect((host, port))
except socket.error as e:
print(str(e))
res = ClientMultiSocket.recv(1024)
signal.signal(signal.SIGINT, end_read)
# Create an object of the class MFRC522
MIFAREReader = mfrc522.MFRC522()
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
print ("RFID Sticker detected ")
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# Welcome message
print ("MFRC522 data read")
print ("Press Ctrl-C to stop.")
str_id = str(uid[0])+","+str(uid[1])+","+str(uid[2])+","+str(uid[3])
ClientMultiSocket.send(str_id.encode('utf-8'))
action = ClientMultiSocket.recv(1024)
if action == '0':
print ('Phone Calls')
elif action == '1':
print ('Play Music')
exec(open("music.py").read())
elif action == '2':
print ('Weather Info')
exec(open("weather.py").read())
exec(open("texttovoice_weather.py").read())
elif action == '4':
print ('Alarm Clock')
exec(open("alarm.py").read())
elif action == '5':
print ('Current Time')
exec(open("weather.py").read())
exec(open("texttovoice_weather.py").read())
else:
print (action)
action[2:]
text_file = open("tasks.txt", "w")
n = text_file.write(action[2:])
text_file.close()
exec(open("texttovoice_tasks.py").read())#text to voice will need to do the script
ClientMultiSocket.close()
```
#### File: SO3_THEHOC_2021-22_code-folder/Scripts Python Rsapberry/server_mod.py
```python
import socket
import os
from _thread import *
import random
import time
import firebase_admin
from firebase_admin import firestore
from firebase_admin import credentials
from datetime import datetime
cred = credentials.Certificate("secret_key.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
def getDataDB_Functionalities(data):
db = firestore.client()
sticker = data
action = db.collection(u'sticker').document(sticker).get().to_dict().get('action')
return action
def calendar():
day = str(datetime.now().day)
if len(day)==1:
day = '0'+str(day)
month = str(datetime.now().month)
if len(month)==1:
month = '0'+str(month)
today = str(datetime.now().year)+'/'+month+'/'+day
docs = db.collection(u'task').stream()
for doc in docs:
if(doc.to_dict().get('dateTime').split()[0] == today):
everything = doc.to_dict().get('name')
#print(doc.to_dict().get('name')+' => dateTime: '+doc.to_dict().get('dateTime'))
return everything
ServerSideSocket = socket.socket()
host = '192.168.138.45'
port = 1234
CounterConnections = 0
try:
ServerSideSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Server is listening on IP '+ host)
ServerSideSocket.listen(5)
def multi_threaded_client(connection):
connection.send(str.encode('Server connected'))
data = connection.recv(2048)
info = getDataDB_Functionalities(data.decode('utf-8'))
print (info)
info = str(info)
if (info != '3' and info != '0'):
connection.sendall(info.encode('utf-8'))
elif info == '3': # calendar events
ev = calendar()
print (ev)
if ev == None:
info = "There is not tasks for today"
else:
info = ev
connection.sendall(info.encode('utf-8'))
elif info == '0': #phone call
connection.send(info.encode('utf-8'))
db.collection(u'meet').document(u'0').update({u'ramdom': random.random()})
connection.close()
while True:
Client, address = ServerSideSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(multi_threaded_client, (Client, ))
CounterConnections += 1
print('Connection Number: ' + str(CounterConnections))
ServerSideSocket.close()
```
#### File: SO3_THEHOC_2021-22_code-folder/Scripts Python Rsapberry/texttovoice.py
```python
import os
import sys
name = sys.argv[1]
with open(name, 'r') as file:
data = file.read().replace('\n', '')
def robot(text):
os.system("espeak -s 130 ' " + text + " ' ")
robot(data)
``` |
{
"source": "JoseIgnacioRetamalThomsen/Emerging-Technologies-Assessment-2019",
"score": 2
} |
#### File: Emerging-Technologies-Assessment-2019/webapp/app.py
```python
import flask as fl
from flask import escape, request
from keras.models import load_model
import numpy as np
import json
from flask_cors import CORS
import tensorflow as tf
import sys
import logging
import urllib.request
import base64 as b64
from PIL import Image
from io import BytesIO
import re
import imagehelper as ih
from collections import deque
import os
app = fl.Flask(__name__)
CORS(app)
# import the model
tf.keras.backend.set_learning_phase(0) # Ignore dropout at interface
model = load_model('../model/static/model.h5')
# Add index route
@app.route('/')
def home():
"""
Main route return static index.
"""
return app.send_static_file('index.html')
@app.route("/imgs", methods=["POST", "GET"])
def predict():
"""
Main end point get, get a array of images in json format
Responde wiht a json array compose by predictions.
"""
if request.method == "POST":
try:
# Parse request into a list of images
imgs = processRequestData(request.data)
# Create single number images from list
rs = ih.divedeQueue(imgs)
# Make a prediction for each single image
prediction = predictFromQueue(rs)
# Response json list
return json.dumps(prediction)
except:
return "Not posible to process request", 400
def processRequestData(jsonRequest):
"""
Process request from client.
Request is a json array with pictures,
Parse pictures and append them into a queue.
:param jsonRequest: Json binary request.
:return: queue with images.
"""
imgs = deque([])
y = json.loads(jsonRequest)
for x in y:
image64 = x # .decode("utf-8")
# remove header
data = re.sub('data:image/png;base64,', '', image64)
# open image as grayscale
img = Image.open(BytesIO(b64.b64decode(data))).convert('LA')
# add image to queue
imgs.append(img)
return imgs
def predictFromQueue(imgQueue):
"""
Generate preditions from a list of images.
:param imgQueue: List of images.
:return: list with prediction, first element in the list is all predictions on a strins.
"""
response = []
response.append("")
numb = ""
while True:
# end loop if queue empty
if(len(imgQueue) == 0):
break
# get first image
img = imgQueue.popleft()
# crop image
img, x1, x2, x4, x4 = ih.cropImage(img, 255)
# add simulated raster and put image in pixel center of amss
img = ih.simulateMnist(img)
# reshape for use in model
img = img.reshape(1, 28, 28, 1)
# predict actual number
result = model.predict(img)
response.append(result.tolist())
# get value from result vector
num = np.argmax(result, axis=-1)[0]
# add actual number to result
numb += str(num)
response[0] = numb
return response
``` |
{
"source": "JoseIgnacioRetamalThomsen/Thompson-s-construction-",
"score": 4
} |
#### File: re/com/Shunting.py
```python
class Converter:
"""
Methods for converting string.
toPofix() implement, method that convert a infix string into a
postfix string.
"""
def toPofix(self, infix):
"""
Convert infix string to postfix string.
:param infix: infix string to convert
:return: postfix string.
"""
# Order of preference for specials characters
specials = {'-': 60, '*': 50, '+': 46, '?': 43, '.': 40, '|': 30}
# Stack for convert.
stack = list()
# For create the postfix result string.
pofix = ""
isEscape =False
for c in infix:
if isEscape:
pofix = pofix + c;
isEscape = False
else:
if c == '(':
# Push to stack.
# Will server as a marker.
stack.append(c)
elif c == ')':
# Look at the stack.
# stack[-1] works as stack.peek().
while stack[-1] is not '(':
# pop from stack and append it to postfix result
pofix = pofix + stack.pop()
# Remove '(' from the stack.
stack.pop()
elif c == '/':
# escape character
pofix = pofix + c
# next character will be just throw into the stack
isEscape = True
elif c in specials:
# While there is something on the stack
# and C (actual) precedence is less or equals of the last special on the stack
# pop from stack and put into pofix.
# get(c,0) look for c and if is not in returns 0.
while stack and specials.get(c, 0) <= specials.get(stack[-1], 0):
# pop from stack and then add it to postfix result
pofix = pofix + stack.pop()
# add character to stack
stack.append(c)
else:
# Normal character just added to postfix regular expression.
pofix = pofix + c;
# Push anything left in the stack to the end of the pofix.
while stack:
# Push character from stack.
pofix = pofix + stack.pop()
# return result
return pofix
```
#### File: re/com/Test.py
```python
import unittest
import Shunting
import Thomsons
import ThomsonsMap
class Test(unittest.TestCase):
@unittest.skip("feature not implemented")
def test_no_dot_shunting(self):
"""
Test the use of no dot for concatenation on shunting algorithm.
:return: Nothing.
"""
testCases = [ ("ab", "a.b"), ("abc", "a.b.c"), ("a*b*", "a*.b*"), ("(a-z)b?", "(a-z).b?"), (
"a?b+c*(a-z)*t", "a?.b+.c*.(a-z)*.t"), ("(0|(1(01*(00)*0)*1)*)*", "(0|(1.(0.1*.(0.0)*.0)*.1)*)*"),
(
"((a-z)|(A-Z)|(0-9)).((a-z)|(A-Z)|(0-9)|_|/.)*.@.((a-z)|(A-Z)|/.)*./..(((a-z)|(A-Z)).((a-z)|(A-Z)).((a-z)|(A-Z))|((a-z)|(A-Z)).((a-z)|(A-Z)))",
"((a-z)|(A-Z)|(0-9))((a-z)|(A-Z)|(0-9)|_|/.)*@((a-z)|(A-Z)|/.)*/.(((a-z)|(A-Z))((a-z)|(A-Z))((a-z)|(A-Z))|((a-z)|(A-Z))((a-z)|(A-Z)))"),
("abc","abc")]
for case in testCases:
print(case[0])
self.assertEqual(Shunting.Converter().toPofix(case[0]), Shunting.Converter().toPofix(case[1]))
matchtestcases = [ ("a.b.c","",False),
("a.b.c", "abc",True),
("a.b.c","abbc",False),
("a.b.c", "abcc", False),
("a.b.c", "abad", False),
("a.b.c", "abbbc", False),
("a.b.c", "adc", False),
("a.(b|d).c", "", False),
("a.(b|d).c", "abc", True),
("a.(b|d).c", "abbc", False),
("a.(b|d).c", "abcc", False),
("a.(b|d).c", "abad", False),
("a.(b|d).c", "abbbc", False),
("a.(b|d).c", "adc", True),
("a.(b|d)*", "", False),
("a.(b|d)*", "abc", False),
("a.(b|d)*", "abbc", False),
("a.(b|d)*", "abcc", False),
("a.(b|d)*", "abad", False),
("a.(b|d)*", "abbbc", False),
("a.(b|d)*", "adc", False),
("a.(b.b)*.c", "", False),
("a.(b.b)*.c", "abc", False),
("a.(b.b)*.c", "abbc", True),
("a.(b.b)*.c", "abcc", False),
("a.(b.b)*.c", "abad", False),
("a.(b.b)*.c", "abbbc", False),
("a.(b.b)*.c", "adc", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "11", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1001", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "10", False),
]
# fail becuase overflow for multiple of 3 regex test
@unittest.expectedFailure
def test_thomsons(self):
for case in self.matchtestcases:
self.assertEqual(Thomsons.match(case[0],case[1]),case[2])
# def test_isupper(self):
# self.assertTrue('FOO'.isupper())
# self.assertFalse('Foo'.isupper())
#
# def test_split(self):
# s = 'hello world'
# self.assertEqual(s.split(), ['hello', 'world'])
# # check that s.split fails when the separator is not a string
# with self.assertRaises(TypeError):
# s.split(2)
def test_thomsonsMap(self):
for case in self.matchtestcases:
self.assertEqual(ThomsonsMap.compile(Shunting.Converter().toPofix(case[0])).run(case[1]),case[2])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoseIgnacioTamayo/gnxi",
"score": 2
} |
#### File: oc_config_validate/models/lacp.py
```python
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class yc_config_openconfig_lacp__lacp_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for LACP
"""
__slots__ = ('_path_helper', '_extmethods', '__system_priority',)
_yang_name = 'config'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'config']
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/config/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/config/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
system_priority = __builtin__.property(_get_system_priority, _set_system_priority)
_pyangbind_elements = OrderedDict([('system_priority', system_priority), ])
class yc_state_openconfig_lacp__lacp_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for LACP
"""
__slots__ = ('_path_helper', '_extmethods', '__system_priority',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'state']
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/state/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/state/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
system_priority = __builtin__.property(_get_system_priority)
_pyangbind_elements = OrderedDict([('system_priority', system_priority), ])
class yc_config_openconfig_lacp__lacp_interfaces_interface_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for each LACP aggregate interface
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__interval','__lacp_mode','__system_id_mac','__system_priority',)
_yang_name = 'config'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'config']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
def _get_interval(self):
"""
Getter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
return self.__interval
def _set_interval(self, v, load=False):
"""
Setter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interval() directly.
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interval must be of a type compatible with lacp-period-type""",
'defined-type': "openconfig-lacp:lacp-period-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)""",
})
self.__interval = t
if hasattr(self, '_set'):
self._set()
def _unset_interval(self):
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
def _get_lacp_mode(self):
"""
Getter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
return self.__lacp_mode
def _set_lacp_mode(self, v, load=False):
"""
Setter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_mode() directly.
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_mode must be of a type compatible with lacp-activity-type""",
'defined-type': "openconfig-lacp:lacp-activity-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)""",
})
self.__lacp_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_mode(self):
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
def _get_system_id_mac(self):
"""
Getter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
return self.__system_id_mac
def _set_system_id_mac(self, v, load=False):
"""
Setter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_id_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_id_mac() directly.
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_id_mac must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)""",
})
self.__system_id_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_system_id_mac(self):
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
name = __builtin__.property(_get_name, _set_name)
interval = __builtin__.property(_get_interval, _set_interval)
lacp_mode = __builtin__.property(_get_lacp_mode, _set_lacp_mode)
system_id_mac = __builtin__.property(_get_system_id_mac, _set_system_id_mac)
system_priority = __builtin__.property(_get_system_priority, _set_system_priority)
_pyangbind_elements = OrderedDict([('name', name), ('interval', interval), ('lacp_mode', lacp_mode), ('system_id_mac', system_id_mac), ('system_priority', system_priority), ])
class yc_state_openconfig_lacp__lacp_interfaces_interface_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for each LACP aggregate
interface
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__interval','__lacp_mode','__system_id_mac','__system_priority',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=False)
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'state']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /lacp/interfaces/interface/state/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /lacp/interfaces/interface/state/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
def _get_interval(self):
"""
Getter method for interval, mapped from YANG variable /lacp/interfaces/interface/state/interval (lacp-period-type)
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
return self.__interval
def _set_interval(self, v, load=False):
"""
Setter method for interval, mapped from YANG variable /lacp/interfaces/interface/state/interval (lacp-period-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interval() directly.
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interval must be of a type compatible with lacp-period-type""",
'defined-type': "openconfig-lacp:lacp-period-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=False)""",
})
self.__interval = t
if hasattr(self, '_set'):
self._set()
def _unset_interval(self):
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=False)
def _get_lacp_mode(self):
"""
Getter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/state/lacp_mode (lacp-activity-type)
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
return self.__lacp_mode
def _set_lacp_mode(self, v, load=False):
"""
Setter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/state/lacp_mode (lacp-activity-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_mode() directly.
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_mode must be of a type compatible with lacp-activity-type""",
'defined-type': "openconfig-lacp:lacp-activity-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)""",
})
self.__lacp_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_mode(self):
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
def _get_system_id_mac(self):
"""
Getter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/state/system_id_mac (oc-yang:mac-address)
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
return self.__system_id_mac
def _set_system_id_mac(self, v, load=False):
"""
Setter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/state/system_id_mac (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_id_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_id_mac() directly.
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_id_mac must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)""",
})
self.__system_id_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_system_id_mac(self):
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/state/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/state/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
name = __builtin__.property(_get_name)
interval = __builtin__.property(_get_interval)
lacp_mode = __builtin__.property(_get_lacp_mode)
system_id_mac = __builtin__.property(_get_system_id_mac)
system_priority = __builtin__.property(_get_system_priority)
_pyangbind_elements = OrderedDict([('name', name), ('interval', interval), ('lacp_mode', lacp_mode), ('system_id_mac', system_id_mac), ('system_priority', system_priority), ])
class yc_counters_openconfig_lacp__lacp_interfaces_interface_members_member_state_counters(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/members/member/state/counters. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: LACP protocol counters
"""
__slots__ = ('_path_helper', '_extmethods', '__lacp_in_pkts','__lacp_out_pkts','__lacp_rx_errors','__lacp_tx_errors','__lacp_unknown_errors','__lacp_errors','__lacp_timeout_transitions',)
_yang_name = 'counters'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__lacp_in_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_out_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_rx_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-rx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_tx_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-tx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_unknown_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-unknown-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
self.__lacp_timeout_transitions = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-timeout-transitions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'members', 'member', 'state', 'counters']
def _get_lacp_in_pkts(self):
"""
Getter method for lacp_in_pkts, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_in_pkts (oc-yang:counter64)
YANG Description: Number of LACPDUs received
"""
return self.__lacp_in_pkts
def _set_lacp_in_pkts(self, v, load=False):
"""
Setter method for lacp_in_pkts, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_in_pkts (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_in_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_in_pkts() directly.
YANG Description: Number of LACPDUs received
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_in_pkts must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_in_pkts = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_in_pkts(self):
self.__lacp_in_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_out_pkts(self):
"""
Getter method for lacp_out_pkts, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_out_pkts (oc-yang:counter64)
YANG Description: Number of LACPDUs transmitted
"""
return self.__lacp_out_pkts
def _set_lacp_out_pkts(self, v, load=False):
"""
Setter method for lacp_out_pkts, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_out_pkts (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_out_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_out_pkts() directly.
YANG Description: Number of LACPDUs transmitted
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_out_pkts must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_out_pkts = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_out_pkts(self):
self.__lacp_out_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_rx_errors(self):
"""
Getter method for lacp_rx_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_rx_errors (oc-yang:counter64)
YANG Description: Number of LACPDU receive packet errors
"""
return self.__lacp_rx_errors
def _set_lacp_rx_errors(self, v, load=False):
"""
Setter method for lacp_rx_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_rx_errors (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_rx_errors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_rx_errors() directly.
YANG Description: Number of LACPDU receive packet errors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-rx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_rx_errors must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-rx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_rx_errors = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_rx_errors(self):
self.__lacp_rx_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-rx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_tx_errors(self):
"""
Getter method for lacp_tx_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_tx_errors (oc-yang:counter64)
YANG Description: Number of LACPDU transmit packet errors
"""
return self.__lacp_tx_errors
def _set_lacp_tx_errors(self, v, load=False):
"""
Setter method for lacp_tx_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_tx_errors (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_tx_errors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_tx_errors() directly.
YANG Description: Number of LACPDU transmit packet errors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-tx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_tx_errors must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-tx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_tx_errors = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_tx_errors(self):
self.__lacp_tx_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-tx-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_unknown_errors(self):
"""
Getter method for lacp_unknown_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_unknown_errors (oc-yang:counter64)
YANG Description: Number of LACPDU unknown packet errors
"""
return self.__lacp_unknown_errors
def _set_lacp_unknown_errors(self, v, load=False):
"""
Setter method for lacp_unknown_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_unknown_errors (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_unknown_errors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_unknown_errors() directly.
YANG Description: Number of LACPDU unknown packet errors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-unknown-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_unknown_errors must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-unknown-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_unknown_errors = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_unknown_errors(self):
self.__lacp_unknown_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-unknown-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_errors(self):
"""
Getter method for lacp_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_errors (oc-yang:counter64)
YANG Description: Number of LACPDU illegal packet errors
"""
return self.__lacp_errors
def _set_lacp_errors(self, v, load=False):
"""
Setter method for lacp_errors, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_errors (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_errors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_errors() directly.
YANG Description: Number of LACPDU illegal packet errors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_errors must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_errors = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_errors(self):
self.__lacp_errors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
def _get_lacp_timeout_transitions(self):
"""
Getter method for lacp_timeout_transitions, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_timeout_transitions (oc-yang:counter64)
YANG Description: Number of times the LACP state has transitioned
with a timeout since the time the device restarted
or the interface was brought up, whichever is most
recent. The last state change of the LACP timeout
is defined as what is reported as the operating state
to the system. The state change is both a timeout
event and when the timeout event is no longer active.
"""
return self.__lacp_timeout_transitions
def _set_lacp_timeout_transitions(self, v, load=False):
"""
Setter method for lacp_timeout_transitions, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters/lacp_timeout_transitions (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_timeout_transitions is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_timeout_transitions() directly.
YANG Description: Number of times the LACP state has transitioned
with a timeout since the time the device restarted
or the interface was brought up, whichever is most
recent. The last state change of the LACP timeout
is defined as what is reported as the operating state
to the system. The state change is both a timeout
event and when the timeout event is no longer active.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-timeout-transitions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_timeout_transitions must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-timeout-transitions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__lacp_timeout_transitions = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_timeout_transitions(self):
self.__lacp_timeout_transitions = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="lacp-timeout-transitions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:counter64', is_config=False)
lacp_in_pkts = __builtin__.property(_get_lacp_in_pkts)
lacp_out_pkts = __builtin__.property(_get_lacp_out_pkts)
lacp_rx_errors = __builtin__.property(_get_lacp_rx_errors)
lacp_tx_errors = __builtin__.property(_get_lacp_tx_errors)
lacp_unknown_errors = __builtin__.property(_get_lacp_unknown_errors)
lacp_errors = __builtin__.property(_get_lacp_errors)
lacp_timeout_transitions = __builtin__.property(_get_lacp_timeout_transitions)
_pyangbind_elements = OrderedDict([('lacp_in_pkts', lacp_in_pkts), ('lacp_out_pkts', lacp_out_pkts), ('lacp_rx_errors', lacp_rx_errors), ('lacp_tx_errors', lacp_tx_errors), ('lacp_unknown_errors', lacp_unknown_errors), ('lacp_errors', lacp_errors), ('lacp_timeout_transitions', lacp_timeout_transitions), ])
class yc_state_openconfig_lacp__lacp_interfaces_interface_members_member_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/members/member/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for aggregate members
"""
__slots__ = ('_path_helper', '_extmethods', '__interface','__activity','__timeout','__synchronization','__aggregatable','__collecting','__distributing','__system_id','__oper_key','__partner_id','__partner_key','__port_num','__partner_port_num','__last_change','__counters',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
self.__activity = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), is_leaf=True, yang_name="activity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
self.__timeout = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LONG': {}, 'SHORT': {}},), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-timeout-type', is_config=False)
self.__synchronization = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IN_SYNC': {}, 'OUT_SYNC': {}},), is_leaf=True, yang_name="synchronization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-synchronization-type', is_config=False)
self.__aggregatable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="aggregatable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
self.__collecting = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="collecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
self.__distributing = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="distributing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
self.__system_id = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
self.__oper_key = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="oper-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
self.__partner_id = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="partner-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
self.__partner_key = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
self.__port_num = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
self.__partner_port_num = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
self.__last_change = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="last-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-types:timeticks64', is_config=False)
self.__counters = YANGDynClass(base=yc_counters_openconfig_lacp__lacp_interfaces_interface_members_member_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'members', 'member', 'state']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /lacp/interfaces/interface/members/member/state/interface (oc-if:base-interface-ref)
YANG Description: Reference to interface member of the LACP aggregate
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /lacp/interfaces/interface/members/member/state/interface (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to interface member of the LACP aggregate
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=False)
def _get_activity(self):
"""
Getter method for activity, mapped from YANG variable /lacp/interfaces/interface/members/member/state/activity (lacp-activity-type)
YANG Description: Indicates participant is active or passive
"""
return self.__activity
def _set_activity(self, v, load=False):
"""
Setter method for activity, mapped from YANG variable /lacp/interfaces/interface/members/member/state/activity (lacp-activity-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_activity is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_activity() directly.
YANG Description: Indicates participant is active or passive
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), is_leaf=True, yang_name="activity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """activity must be of a type compatible with lacp-activity-type""",
'defined-type': "openconfig-lacp:lacp-activity-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), is_leaf=True, yang_name="activity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)""",
})
self.__activity = t
if hasattr(self, '_set'):
self._set()
def _unset_activity(self):
self.__activity = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), is_leaf=True, yang_name="activity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=False)
def _get_timeout(self):
"""
Getter method for timeout, mapped from YANG variable /lacp/interfaces/interface/members/member/state/timeout (lacp-timeout-type)
YANG Description: The timeout type (short or long) used by the
participant
"""
return self.__timeout
def _set_timeout(self, v, load=False):
"""
Setter method for timeout, mapped from YANG variable /lacp/interfaces/interface/members/member/state/timeout (lacp-timeout-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_timeout is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_timeout() directly.
YANG Description: The timeout type (short or long) used by the
participant
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LONG': {}, 'SHORT': {}},), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-timeout-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """timeout must be of a type compatible with lacp-timeout-type""",
'defined-type': "openconfig-lacp:lacp-timeout-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LONG': {}, 'SHORT': {}},), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-timeout-type', is_config=False)""",
})
self.__timeout = t
if hasattr(self, '_set'):
self._set()
def _unset_timeout(self):
self.__timeout = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LONG': {}, 'SHORT': {}},), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-timeout-type', is_config=False)
def _get_synchronization(self):
"""
Getter method for synchronization, mapped from YANG variable /lacp/interfaces/interface/members/member/state/synchronization (lacp-synchronization-type)
YANG Description: Indicates whether the participant is in-sync or
out-of-sync
"""
return self.__synchronization
def _set_synchronization(self, v, load=False):
"""
Setter method for synchronization, mapped from YANG variable /lacp/interfaces/interface/members/member/state/synchronization (lacp-synchronization-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_synchronization is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_synchronization() directly.
YANG Description: Indicates whether the participant is in-sync or
out-of-sync
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IN_SYNC': {}, 'OUT_SYNC': {}},), is_leaf=True, yang_name="synchronization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-synchronization-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """synchronization must be of a type compatible with lacp-synchronization-type""",
'defined-type': "openconfig-lacp:lacp-synchronization-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IN_SYNC': {}, 'OUT_SYNC': {}},), is_leaf=True, yang_name="synchronization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-synchronization-type', is_config=False)""",
})
self.__synchronization = t
if hasattr(self, '_set'):
self._set()
def _unset_synchronization(self):
self.__synchronization = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IN_SYNC': {}, 'OUT_SYNC': {}},), is_leaf=True, yang_name="synchronization", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-synchronization-type', is_config=False)
def _get_aggregatable(self):
"""
Getter method for aggregatable, mapped from YANG variable /lacp/interfaces/interface/members/member/state/aggregatable (boolean)
YANG Description: A true value indicates that the participant will allow
the link to be used as part of the aggregate. A false
value indicates the link should be used as an individual
link
"""
return self.__aggregatable
def _set_aggregatable(self, v, load=False):
"""
Setter method for aggregatable, mapped from YANG variable /lacp/interfaces/interface/members/member/state/aggregatable (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_aggregatable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_aggregatable() directly.
YANG Description: A true value indicates that the participant will allow
the link to be used as part of the aggregate. A false
value indicates the link should be used as an individual
link
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="aggregatable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """aggregatable must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="aggregatable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)""",
})
self.__aggregatable = t
if hasattr(self, '_set'):
self._set()
def _unset_aggregatable(self):
self.__aggregatable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="aggregatable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
def _get_collecting(self):
"""
Getter method for collecting, mapped from YANG variable /lacp/interfaces/interface/members/member/state/collecting (boolean)
YANG Description: If true, the participant is collecting incoming frames
on the link, otherwise false
"""
return self.__collecting
def _set_collecting(self, v, load=False):
"""
Setter method for collecting, mapped from YANG variable /lacp/interfaces/interface/members/member/state/collecting (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_collecting is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collecting() directly.
YANG Description: If true, the participant is collecting incoming frames
on the link, otherwise false
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="collecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collecting must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="collecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)""",
})
self.__collecting = t
if hasattr(self, '_set'):
self._set()
def _unset_collecting(self):
self.__collecting = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="collecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
def _get_distributing(self):
"""
Getter method for distributing, mapped from YANG variable /lacp/interfaces/interface/members/member/state/distributing (boolean)
YANG Description: When true, the participant is distributing outgoing
frames; when false, distribution is disabled
"""
return self.__distributing
def _set_distributing(self, v, load=False):
"""
Setter method for distributing, mapped from YANG variable /lacp/interfaces/interface/members/member/state/distributing (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_distributing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distributing() directly.
YANG Description: When true, the participant is distributing outgoing
frames; when false, distribution is disabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="distributing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distributing must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="distributing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)""",
})
self.__distributing = t
if hasattr(self, '_set'):
self._set()
def _unset_distributing(self):
self.__distributing = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="distributing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='boolean', is_config=False)
def _get_system_id(self):
"""
Getter method for system_id, mapped from YANG variable /lacp/interfaces/interface/members/member/state/system_id (oc-yang:mac-address)
YANG Description: MAC address that defines the local system ID for the
aggregate interface
"""
return self.__system_id
def _set_system_id(self, v, load=False):
"""
Setter method for system_id, mapped from YANG variable /lacp/interfaces/interface/members/member/state/system_id (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_id() directly.
YANG Description: MAC address that defines the local system ID for the
aggregate interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_id must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)""",
})
self.__system_id = t
if hasattr(self, '_set'):
self._set()
def _unset_system_id(self):
self.__system_id = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="system-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
def _get_oper_key(self):
"""
Getter method for oper_key, mapped from YANG variable /lacp/interfaces/interface/members/member/state/oper_key (uint16)
YANG Description: Current operational value of the key for the aggregate
interface
"""
return self.__oper_key
def _set_oper_key(self, v, load=False):
"""
Setter method for oper_key, mapped from YANG variable /lacp/interfaces/interface/members/member/state/oper_key (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_oper_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_oper_key() directly.
YANG Description: Current operational value of the key for the aggregate
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="oper-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """oper_key must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="oper-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__oper_key = t
if hasattr(self, '_set'):
self._set()
def _unset_oper_key(self):
self.__oper_key = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="oper-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
def _get_partner_id(self):
"""
Getter method for partner_id, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_id (oc-yang:mac-address)
YANG Description: MAC address representing the protocol partner's interface
system ID
"""
return self.__partner_id
def _set_partner_id(self, v, load=False):
"""
Setter method for partner_id, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_id (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_partner_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_partner_id() directly.
YANG Description: MAC address representing the protocol partner's interface
system ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="partner-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """partner_id must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="partner-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)""",
})
self.__partner_id = t
if hasattr(self, '_set'):
self._set()
def _unset_partner_id(self):
self.__partner_id = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="partner-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=False)
def _get_partner_key(self):
"""
Getter method for partner_key, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_key (uint16)
YANG Description: Operational value of the protocol partner's key
"""
return self.__partner_key
def _set_partner_key(self, v, load=False):
"""
Setter method for partner_key, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_key (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_partner_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_partner_key() directly.
YANG Description: Operational value of the protocol partner's key
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """partner_key must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__partner_key = t
if hasattr(self, '_set'):
self._set()
def _unset_partner_key(self):
self.__partner_key = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
def _get_port_num(self):
"""
Getter method for port_num, mapped from YANG variable /lacp/interfaces/interface/members/member/state/port_num (uint16)
YANG Description: Port number of the local (actor) aggregation member
"""
return self.__port_num
def _set_port_num(self, v, load=False):
"""
Setter method for port_num, mapped from YANG variable /lacp/interfaces/interface/members/member/state/port_num (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_num is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_num() directly.
YANG Description: Port number of the local (actor) aggregation member
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_num must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__port_num = t
if hasattr(self, '_set'):
self._set()
def _unset_port_num(self):
self.__port_num = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
def _get_partner_port_num(self):
"""
Getter method for partner_port_num, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_port_num (uint16)
YANG Description: Port number of the partner (remote) port for this member
port
"""
return self.__partner_port_num
def _set_partner_port_num(self, v, load=False):
"""
Setter method for partner_port_num, mapped from YANG variable /lacp/interfaces/interface/members/member/state/partner_port_num (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_partner_port_num is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_partner_port_num() directly.
YANG Description: Port number of the partner (remote) port for this member
port
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """partner_port_num must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)""",
})
self.__partner_port_num = t
if hasattr(self, '_set'):
self._set()
def _unset_partner_port_num(self):
self.__partner_port_num = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="partner-port-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=False)
def _get_last_change(self):
"""
Getter method for last_change, mapped from YANG variable /lacp/interfaces/interface/members/member/state/last_change (oc-types:timeticks64)
YANG Description: The timestamp indicates the absolute time of the last state
change of a LACP timeout. The last state change of the LACP
timeout is defined as what is reported as the operating state
to the system. The state change is both a timeout event and
when the timeout event is no longer active. The value is the
timestamp in nanoseconds relative to the Unix Epoch
(Jan 1, 1970 00:00:00 UTC).
"""
return self.__last_change
def _set_last_change(self, v, load=False):
"""
Setter method for last_change, mapped from YANG variable /lacp/interfaces/interface/members/member/state/last_change (oc-types:timeticks64)
If this variable is read-only (config: false) in the
source YANG file, then _set_last_change is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_last_change() directly.
YANG Description: The timestamp indicates the absolute time of the last state
change of a LACP timeout. The last state change of the LACP
timeout is defined as what is reported as the operating state
to the system. The state change is both a timeout event and
when the timeout event is no longer active. The value is the
timestamp in nanoseconds relative to the Unix Epoch
(Jan 1, 1970 00:00:00 UTC).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="last-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-types:timeticks64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """last_change must be of a type compatible with oc-types:timeticks64""",
'defined-type': "oc-types:timeticks64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="last-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-types:timeticks64', is_config=False)""",
})
self.__last_change = t
if hasattr(self, '_set'):
self._set()
def _unset_last_change(self):
self.__last_change = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="last-change", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-types:timeticks64', is_config=False)
def _get_counters(self):
"""
Getter method for counters, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters (container)
YANG Description: LACP protocol counters
"""
return self.__counters
def _set_counters(self, v, load=False):
"""
Setter method for counters, mapped from YANG variable /lacp/interfaces/interface/members/member/state/counters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_counters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_counters() directly.
YANG Description: LACP protocol counters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_counters_openconfig_lacp__lacp_interfaces_interface_members_member_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """counters must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_counters_openconfig_lacp__lacp_interfaces_interface_members_member_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)""",
})
self.__counters = t
if hasattr(self, '_set'):
self._set()
def _unset_counters(self):
self.__counters = YANGDynClass(base=yc_counters_openconfig_lacp__lacp_interfaces_interface_members_member_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
interface = __builtin__.property(_get_interface)
activity = __builtin__.property(_get_activity)
timeout = __builtin__.property(_get_timeout)
synchronization = __builtin__.property(_get_synchronization)
aggregatable = __builtin__.property(_get_aggregatable)
collecting = __builtin__.property(_get_collecting)
distributing = __builtin__.property(_get_distributing)
system_id = __builtin__.property(_get_system_id)
oper_key = __builtin__.property(_get_oper_key)
partner_id = __builtin__.property(_get_partner_id)
partner_key = __builtin__.property(_get_partner_key)
port_num = __builtin__.property(_get_port_num)
partner_port_num = __builtin__.property(_get_partner_port_num)
last_change = __builtin__.property(_get_last_change)
counters = __builtin__.property(_get_counters)
_pyangbind_elements = OrderedDict([('interface', interface), ('activity', activity), ('timeout', timeout), ('synchronization', synchronization), ('aggregatable', aggregatable), ('collecting', collecting), ('distributing', distributing), ('system_id', system_id), ('oper_key', oper_key), ('partner_id', partner_id), ('partner_key', partner_key), ('port_num', port_num), ('partner_port_num', partner_port_num), ('last_change', last_change), ('counters', counters), ])
class yc_member_openconfig_lacp__lacp_interfaces_interface_members_member(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/members/member. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of member interfaces and their associated status for
a LACP-controlled aggregate interface. Member list is not
configurable here -- each interface indicates items
its participation in the LAG.
"""
__slots__ = ('_path_helper', '_extmethods', '__interface','__state',)
_yang_name = 'member'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=False)
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_members_member_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'members', 'member']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /lacp/interfaces/interface/members/member/interface (leafref)
YANG Description: Reference to aggregate member interface
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /lacp/interfaces/interface/members/member/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to aggregate member interface
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=False)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /lacp/interfaces/interface/members/member/state (container)
YANG Description: Operational state data for aggregate members
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /lacp/interfaces/interface/members/member/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for aggregate members
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_lacp__lacp_interfaces_interface_members_member_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_members_member_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_members_member_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=False)
interface = __builtin__.property(_get_interface)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([('interface', interface), ('state', state), ])
class yc_members_openconfig_lacp__lacp_interfaces_interface_members(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/members. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for the list of members interfaces of
the aggregate. This list is considered operational state
only so is labeled config false and has no config container
"""
__slots__ = ('_path_helper', '_extmethods', '__member',)
_yang_name = 'members'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__member = YANGDynClass(base=YANGListType("interface",yc_member_openconfig_lacp__lacp_interfaces_interface_members_member, yang_name="member", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface', extensions=None), is_container='list', yang_name="member", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'members']
def _get_member(self):
"""
Getter method for member, mapped from YANG variable /lacp/interfaces/interface/members/member (list)
YANG Description: List of member interfaces and their associated status for
a LACP-controlled aggregate interface. Member list is not
configurable here -- each interface indicates items
its participation in the LAG.
"""
return self.__member
def _set_member(self, v, load=False):
"""
Setter method for member, mapped from YANG variable /lacp/interfaces/interface/members/member (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_member is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member() directly.
YANG Description: List of member interfaces and their associated status for
a LACP-controlled aggregate interface. Member list is not
configurable here -- each interface indicates items
its participation in the LAG.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface",yc_member_openconfig_lacp__lacp_interfaces_interface_members_member, yang_name="member", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface', extensions=None), is_container='list', yang_name="member", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface",yc_member_openconfig_lacp__lacp_interfaces_interface_members_member, yang_name="member", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface', extensions=None), is_container='list', yang_name="member", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=False)""",
})
self.__member = t
if hasattr(self, '_set'):
self._set()
def _unset_member(self):
self.__member = YANGDynClass(base=YANGListType("interface",yc_member_openconfig_lacp__lacp_interfaces_interface_members_member, yang_name="member", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface', extensions=None), is_container='list', yang_name="member", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=False)
member = __builtin__.property(_get_member)
_pyangbind_elements = OrderedDict([('member', member), ])
class yc_interface_openconfig_lacp__lacp_interfaces_interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of aggregate interfaces managed by LACP
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__config','__state','__members',)
_yang_name = 'interface'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=True)
self.__config = YANGDynClass(base=yc_config_openconfig_lacp__lacp_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
self.__members = YANGDynClass(base=yc_members_openconfig_lacp__lacp_interfaces_interface_members, is_container='container', yang_name="members", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /lacp/interfaces/interface/name (leafref)
YANG Description: Reference to the list key
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /lacp/interfaces/interface/name (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the list key
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='leafref', is_config=True)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /lacp/interfaces/interface/config (container)
YANG Description: Configuration data for each LACP aggregate interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /lacp/interfaces/interface/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for each LACP aggregate interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_lacp__lacp_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_lacp__lacp_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_lacp__lacp_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /lacp/interfaces/interface/state (container)
YANG Description: Operational state data for each LACP aggregate
interface
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /lacp/interfaces/interface/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for each LACP aggregate
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_lacp__lacp_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
def _get_members(self):
"""
Getter method for members, mapped from YANG variable /lacp/interfaces/interface/members (container)
YANG Description: Enclosing container for the list of members interfaces of
the aggregate. This list is considered operational state
only so is labeled config false and has no config container
"""
return self.__members
def _set_members(self, v, load=False):
"""
Setter method for members, mapped from YANG variable /lacp/interfaces/interface/members (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_members is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_members() directly.
YANG Description: Enclosing container for the list of members interfaces of
the aggregate. This list is considered operational state
only so is labeled config false and has no config container
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_members_openconfig_lacp__lacp_interfaces_interface_members, is_container='container', yang_name="members", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """members must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_members_openconfig_lacp__lacp_interfaces_interface_members, is_container='container', yang_name="members", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__members = t
if hasattr(self, '_set'):
self._set()
def _unset_members(self):
self.__members = YANGDynClass(base=yc_members_openconfig_lacp__lacp_interfaces_interface_members, is_container='container', yang_name="members", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
members = __builtin__.property(_get_members, _set_members)
_pyangbind_elements = OrderedDict([('name', name), ('config', config), ('state', state), ('members', members), ])
class yc_interfaces_openconfig_lacp__lacp_interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for the list of LACP-enabled
interfaces
"""
__slots__ = ('_path_helper', '_extmethods', '__interface',)
_yang_name = 'interfaces'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("name",yc_interface_openconfig_lacp__lacp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /lacp/interfaces/interface (list)
YANG Description: List of aggregate interfaces managed by LACP
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /lacp/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of aggregate interfaces managed by LACP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",yc_interface_openconfig_lacp__lacp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",yc_interface_openconfig_lacp__lacp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=YANGListType("name",yc_interface_openconfig_lacp__lacp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='list', is_config=True)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([('interface', interface), ])
class yc_lacp_openconfig_lacp__lacp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state data for LACP protocol
operation on the aggregate interface
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state','__interfaces',)
_yang_name = 'lacp'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=yc_config_openconfig_lacp__lacp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_lacp__lacp_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /lacp/config (container)
YANG Description: Configuration data for LACP
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /lacp/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for LACP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_lacp__lacp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_lacp__lacp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_lacp__lacp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /lacp/state (container)
YANG Description: Operational state data for LACP
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /lacp/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for LACP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_lacp__lacp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_lacp__lacp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_lacp__lacp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
def _get_interfaces(self):
"""
Getter method for interfaces, mapped from YANG variable /lacp/interfaces (container)
YANG Description: Enclosing container for the list of LACP-enabled
interfaces
"""
return self.__interfaces
def _set_interfaces(self, v, load=False):
"""
Setter method for interfaces, mapped from YANG variable /lacp/interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interfaces() directly.
YANG Description: Enclosing container for the list of LACP-enabled
interfaces
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_interfaces_openconfig_lacp__lacp_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interfaces must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_interfaces_openconfig_lacp__lacp_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__interfaces = t
if hasattr(self, '_set'):
self._set()
def _unset_interfaces(self):
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_lacp__lacp_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
interfaces = __builtin__.property(_get_interfaces, _set_interfaces)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ('interfaces', interfaces), ])
class openconfig_lacp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /openconfig-lacp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This module describes configuration and operational state
data for Link Aggregation Control Protocol (LACP) for
managing aggregate interfaces. It works in conjunction with
the OpenConfig interfaces and aggregate interfaces models.
"""
__slots__ = ('_path_helper', '_extmethods', '__lacp',)
_yang_name = 'openconfig-lacp'
_yang_namespace = 'http://openconfig.net/yang/lacp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__lacp = YANGDynClass(base=yc_lacp_openconfig_lacp__lacp, is_container='container', yang_name="lacp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return []
def _get_lacp(self):
"""
Getter method for lacp, mapped from YANG variable /lacp (container)
YANG Description: Configuration and operational state data for LACP protocol
operation on the aggregate interface
"""
return self.__lacp
def _set_lacp(self, v, load=False):
"""
Setter method for lacp, mapped from YANG variable /lacp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp() directly.
YANG Description: Configuration and operational state data for LACP protocol
operation on the aggregate interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_lacp_openconfig_lacp__lacp, is_container='container', yang_name="lacp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_lacp_openconfig_lacp__lacp, is_container='container', yang_name="lacp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)""",
})
self.__lacp = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp(self):
self.__lacp = YANGDynClass(base=yc_lacp_openconfig_lacp__lacp, is_container='container', yang_name="lacp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='container', is_config=True)
lacp = __builtin__.property(_get_lacp, _set_lacp)
_pyangbind_elements = OrderedDict([('lacp', lacp), ])
```
#### File: oc_config_validate/testcases/static_route.py
```python
import json
from pyangbind.lib import pybindJSON
from retry import retry
from oc_config_validate import schema, testbase
from oc_config_validate.models.local_routing import openconfig_local_routing
class AddStaticRoute(testbase.TestCase):
"""Tests configuring a static route.
1. A gNMI Set message is sent to configure the route.
1. A gNMI Get message on the /config and /state containers validates it.
All arguments are read from the Test YAML description.
Args:
description: Optional text description of the route.
prefix: Destination prefix of the static route.
next_hop: IP of the next hop of the route.
index: Index of the next hop for the prefix. Defaults to 0.
metric: Optional numeric metric of the next hop for the prefix.
"""
description = None
prefix = ""
next_hop = ""
index = 0
metric = None
route = None
def setUp(self):
self.route = openconfig_local_routing(
).local_routes.static_routes.static.add(self.prefix)
self.route.config.prefix = self.prefix
if self.description is not None:
self.route.config.description = self.description
self.nh = self.route.next_hops.next_hop.add(self.index)
self.nh.config.index = self.index
self.nh.config.next_hop = self.next_hop
if self.metric is not None:
self.nh.config.metric = self.metric
def test0001(self):
self.assertArgs(["prefix", "next_hop"])
def test0100(self):
xpath = "/local-routes/static-routes/static[prefix=%s]" % self.prefix
_json_value = json.loads(pybindJSON.dumps(self.route, mode='ietf'))
self.assertTrue(self.gNMISetUpdate(xpath, _json_value),
"gNMI Set did not succeed.")
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0200(self):
xpath = ("/local-routes/static-routes/static[prefix=%s]/"
"next-hops/next-hop[index=%d]/config") % (
self.prefix, self.index)
resp_val = self.gNMIGetJson(xpath)
self.assertJsonModel(
resp_val, self.nh.config,
'gNMI Get on the /config container does not match model')
want = json.loads(
schema.removeOpenConfigPrefix(
pybindJSON.dumps(self.nh.config, mode='ietf')))
self.assertJsonCmp(resp_val, want)
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0300(self):
xpath = ("/local-routes/static-routes/static[prefix=%s]/"
"next-hops/next-hop[index=%d]/state") % (
self.prefix, self.index)
resp_val = self.gNMIGetJson(xpath)
self.nh.state._set_index(self.index)
self.nh.state._set_next_hop(self.next_hop)
if self.metric is not None:
self.nh.state._set_metric(self.metric)
self.assertJsonModel(
resp_val, self.nh.state,
'gNMI Get on the /state container does not match model')
want = json.loads(
schema.removeOpenConfigPrefix(
pybindJSON.dumps(self.nh.state, mode='ietf')))
self.assertJsonCmp(resp_val, want)
class RemoveStaticRoute(testbase.TestCase):
"""Tests removing a static route.
1. gNMI Get message on the /config container, to check it is configured.
1. gNMI Set message to delete the route.
1. gNMI Get message on the /config container to check it is not there.
All arguments are read from the Test YAML description.
Args:
prefix: Destination prefix of the static route.
index: Index of the next hop for the prefix. Defaults to 0.
"""
prefix = ""
index = 0
def test0000(self):
self.assertArgs(["prefix"])
def test0100(self):
xpath = ("/local-routes/static-routes/static[prefix=%s]"
"/next-hops/next-hop[index=%d]") % (
self.prefix, self.index)
if not self.gNMIGet(xpath + "/config"):
self.log("Route to %s via next-hop %d not configured",
self.prefix, self.index)
return
self.assertTrue(self.gNMISetDelete(xpath),
"gNMI Delete did not succeed.")
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0200(self):
xpath = ("/local-routes/static-routes/static[prefix=%s]"
"/next-hops/next-hop[index=%d]/config") % (
self.prefix, self.index)
resp = self.gNMIGet(xpath)
self.assertIsNone(
resp,
"Route to %s via next-hop %d still configured" % (self.prefix,
self.index))
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0300(self):
xpath = ("/local-routes/static-routes/static[prefix=%s]"
"/next-hops/next-hop[index=%d]/state") % (
self.prefix, self.index)
resp = self.gNMIGet(xpath)
self.assertIsNone(
resp,
"Route to %s via next-hop %d still present" % (self.prefix,
self.index))
class CheckRouteState(testbase.TestCase):
"""Checks the state on a static route.
1. A gNMI Get message on the /state container.
All arguments are read from the Test YAML description.
Args:
description: Optional text description of the route.
prefix: Destination prefix of the static route.
next_hop: IP of the next hop of the route.
index: Index of the next hop for the prefix. Defaults to 0.
metric: Optional numeric metric of the next hop for the prefix.
"""
description = None
prefix = ""
next_hop = ""
index = 0
metric = None
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0100(self):
""""""
xpath = ("/local-routes/static-routes/static[prefix=%s]"
"/next-hops/next-hop[index=%d]/state") % (
self.prefix, self.index)
ste = openconfig_local_routing().local_routes.static_routes.static.\
add(prefix=self.prefix).next_hops.next_hop.add(index=self.index).\
state
ste._set_index(self.index)
ste._set_next_hop(self.next_hop)
if self.metric is not None:
ste._set_metric(self.metric)
resp_val = self.gNMIGetJson(xpath)
self.assertJsonModel(
resp_val, ste,
'gNMI Get on the /state container does not match model')
want = json.loads(
schema.removeOpenConfigPrefix(
pybindJSON.dumps(ste, mode='ietf')))
self.assertJsonCmp(resp_val, want)
class CheckRouteConfig(testbase.TestCase):
"""Checks the configuration on a static route.
1. A gNMI Get message on the /config container.
All arguments are read from the Test YAML description.
Args:
description: Optional text description of the route.
prefix: Destination prefix of the static route.
next_hop: IP of the next hop of the route.
index: Index of the next hop for the prefix. Defaults to 0.
metric: Optional numeric metric of the next hop for the prefix.
"""
description = None
prefix = ""
next_hop = ""
index = 0
metric = None
@retry(exceptions=AssertionError, tries=3, delay=10)
def test0100(self):
""""""
xpath = ("/local-routes/static-routes/static[prefix=%s]"
"/next-hops/next-hop[index=%d]/config") % (
self.prefix, self.index)
cfg = openconfig_local_routing().local_routes.static_routes.static.\
add(prefix=self.prefix).next_hops.next_hop.add(index=self.index).\
config.config()
cfg._set_index(self.index)
cfg._set_next_hop(self.next_hop)
if self.metric is not None:
cfg._set_metric(self.metric)
resp_val = self.gNMIGetJson(xpath)
self.assertJsonModel(
resp_val, cfg,
'gNMI Get on the /config container does not match model')
want = json.loads(
schema.removeOpenConfigPrefix(
pybindJSON.dumps(cfg, mode='ietf')))
self.assertJsonCmp(resp_val, want)
``` |
{
"source": "JoseIgnacioTamayo/SSHRemoteControl",
"score": 3
} |
#### File: sshRemoteControl/lib/Devices.py
```python
import paramiko
import time
import sys
import paramiko.ssh_exception as ParamikoExcept
from socket import error as SocketError
# Development log
# paramiko.common.logging.basicConfig(level=paramiko.common.INFO)
_debug = False
_bufferSize = 1024 # Buffer for SSH connection
listOfDeviceTypes = ("ciscoios", "ciscowlc", "linux")
def createDevice(hostname, typeClass=""):
"""Return a Device object, created for the hostname.
Parameters:
* hostname: Ip or hostname to connect to via SSH
* typeClass: A class of device. If "" or not in 'Devices.listOfDevices',
the default Device is created. This is a case-insensitive string.
"""
if typeClass.lower() == "ciscoios":
return DeviceCiscoIOS(hostname)
elif typeClass.lower() == "ciscowlc":
return DeviceCiscoWLC(hostname)
elif typeClass.lower() == "linux":
return DeviceLinux(hostname)
else:
return Device(hostname)
class Device(object):
"""Represents a remote controlled device connected with SSH.
The Device class is a generic that interacts witht the Activity module.
Any specific device must inherit from this Device class and
implement the methods login() and super().
Source:
http://jessenoller.com/blog/2009/02/05/ssh-programming-with-paramiko-completely-different,
http://stackoverflow.com/questions/25101619/reading-output-of-top-command-using-paramiko
Example:
aDevice = Device('localhost', type = Devices.DeviceCisco)
aDevice.connect()
aDevice.super() #if needed
aDevice.run({"","",""},outfile)
aDevice.logout()
"""
def __init__(self, hostname):
"""Build."""
self.hostname = hostname
self.SSHClient = paramiko.SSHClient()
self.RemoteShell = None
self.username = ""
self.password = ""
def connect(self, username, password):
"""Brings up the SSH connection, if possible, using the credentials.
No command is typed once the SSH session is open.
Returns True if connection ok, False if not.
"""
self.username = username
self.password = password
try:
self.SSHClient.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
self.SSHClient.connect(
self.hostname,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
self.RemoteShell = self.SSHClient.invoke_shell()
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write(
"Device.connect to '%s' caused Exception %s"
% (self.hostname, e))
return False
def run(self, commands, outfile):
r"""Run a list of commands and writes the output to a file.
File must be already opened when passed.
Returns True if all commands were sent ok, False if not.
Parameters:
* commands: a list of strings witht the commands.
No need to have "\n" at the end.
* outfile: a file pointer to write data to.
This file must have been opened and accepting input.
"""
cmd = ""
try:
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
outfile.write(data)
for cmd in commands:
if self.RemoteShell.send(cmd + "\n") == 0:
raise IOError
time.sleep(2)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
outfile.write(data)
return True
except (ParamikoExcept.SSHException, SocketError, UnicodeDecodeError) as e:
if _debug:
sys.stderr.write(
"Device.run: Command '%s' caused exception %s"
% (cmd, e))
return False
def superuser(self, password):
"""Do nothingself.
The Default device does not have an ENABLE mechanism.
This is to be implemented in the Child classes.
Return True always.
"""
return True
def login(self):
"""Do nothing.
The Default device does not have an Login mechanism,
the SSH connection should be enough.
This is to be implemented in the Child classes.
Returns True always.
"""
return True
def logout(self):
"""Do Nothing.
The default Device does not send any command,
just closes the SSH session via TCP close.
The session might not be availabe for logout if any 'close' or 'logout'
was among the executed commands.
"""
try:
self.RemoteShell.close()
self.SSHClient.close()
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("Device.logout: Exception %s" % e)
class DeviceCiscoIOS(Device):
"""Device specific for Cisco IOS switches and routers."""
def superuser(self, password):
"""Enable mechanism.
Returns True if at the end there is the '#' in the prompt.
Parameters:
* password: <PASSWORD>
"""
result = False
try:
while self.RemoteShell.recv_ready():
self.RemoteShell.recv(_bufferSize).decode("utf-8")
self.RemoteShell.send("enable \n")
data = ""
time.sleep(1)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find("Password:") != -1:
self.RemoteShell.send(password + "\n")
data = ""
time.sleep(1)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find("#") != -1:
result = True
except ParamikoExcept.SSHException as e:
if _debug:
sys.stderr.write("DeviceCiscoIOS.superuser: Exception %s" % e)
return result
def logout(self):
"""Send the 'end' and 'exit' commands before closing the SSH session.
Returns False if there were any errors while loging out.
"""
try:
while self.RemoteShell.recv_ready():
self.RemoteShell.recv(_bufferSize).decode("utf-8")
self.RemoteShell.send("end \n")
time.sleep(1)
self.RemoteShell.send("exit \n")
time.sleep(1)
super(DeviceCiscoIOS, self).logout()
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("DeviceCiscoIOS.logout Exception %s" % e)
return False
def login(self):
"""Send commands just after login.
The command 'terminal lenght 0' is sent just after connecting,
to have all the output placed on Screen.
Returns True if connection is ok, False if not.
"""
try:
while self.RemoteShell.recv_ready():
self.RemoteShell.recv(_bufferSize).decode("utf-8")
self.RemoteShell.send("terminal length 0 \n")
time.sleep(1)
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("DeviceCiscoIOS.login Exception %s" % e)
return False
class DeviceCiscoWLC(Device):
"""Device specific for Cisco WLC.
Has an extra login prompt after the SSH is opened.
By default, when loging out, the changes are NOT saved.
If the changes are to be saved, put these commands in the list:
<commands>
end
save config
y
"""
def logout(self):
"""Send the 'end' and 'exit' commands before closing the SSH session.
To the 'Save config?' question, the answer given is NO.
Returns the result of Device.logout(). False if there were any errors.
"""
try:
self.RemoteShell.send("end \n")
time.sleep(1)
self.RemoteShell.send("exit \n")
time.sleep(1)
data = ""
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find("save?") != -1:
self.RemoteShell.send("No \n")
time.sleep(1)
super(DeviceCiscoWLC, self).logout()
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("DeviceCiscoWLC.logout Exception %s" % e)
return False
def login(self):
"""Send the credentials again, because there is this crazy login.
Returns True if at the end there is the correct prompt '>',
False if not.
"""
try:
self.RemoteShell.send(self.username + "\n")
time.sleep(1)
while self.RemoteShell.recv_ready():
self.RemoteShell.recv(_bufferSize).decode("utf-8")
self.RemoteShell.send(self.password + "\n")
time.sleep(1)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find(">") == -1:
return False
self.RemoteShell.send("config paging disable \n")
time.sleep(1)
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("DeviceCiscoWLC.login Exception %s" % e)
return False
class DeviceLinux(Device):
"""Device specific for Linux using 'su'."""
def superuser(self, password):
"""Send 'su' command and expects 'Password:' as reply.
Then inputs the Root password and checks that the prompt
contains 'root' string.
Returns True if after the 'su', the prompt sais 'root'. False if not.
Parameters:
* password: <PASSWORD>
"""
try:
while self.RemoteShell.recv_ready():
self.RemoteShell.recv(_bufferSize).decode("utf-8")
self.RemoteShell.send("su \n")
data = ""
time.sleep(1)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find("Password:") != -1:
self.RemoteShell.send(password + "\n")
data = ""
time.sleep(1)
while self.RemoteShell.recv_ready():
data = self.RemoteShell.recv(_bufferSize).decode("utf-8")
if data.find("root") != -1:
return True
return False
except (ParamikoExcept.SSHException, UnicodeDecodeError) as e:
if _debug:
sys.stderr.write("DeviceLinux.superuser Exception %s" % e)
return False
def logout(self):
"""Type 'logout' command before exiting the SSH Session.
Returns False if there were any errors.
"""
try:
self.RemoteShell.send("logout \n")
time.sleep(1)
super(DeviceLinux, self).logout()
return True
except (ParamikoExcept.SSHException, SocketError) as e:
if _debug:
sys.stderr.write("DeviceLinux.logout Exception %s" % e)
return False
``` |
{
"source": "joseildofilho/MobileNetV2",
"score": 3
} |
#### File: joseildofilho/MobileNetV2/train.py
```python
import os
import sys
import argparse
import pandas as pd
from mobilenet_v2 import MobileNetv2
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.layers import Conv2D, Reshape, Activation
from keras.models import Model
import tensorflow as tf
from sklearn.metrics import classification_report
import numpy as np
def main(argv):
parser = argparse.ArgumentParser()
# Required arguments.
parser.add_argument(
"--classes",
help="The number of classes of dataset.")
# Optional arguments.
parser.add_argument(
"--size",
default=224,
help="The image size of train sample.")
parser.add_argument(
"--batch",
default=32,
help="The number of train samples per batch.")
parser.add_argument(
"--epochs",
default=300,
help="The number of train iterations.")
parser.add_argument(
"--weights",
default=False,
help="Fine tune with other weights.")
parser.add_argument(
"--tclasses",
default=0,
help="The number of classes of pre-trained model.")
parser.add_argument(
"--tflite",
"-tl",
action="store_true",
help="The name of file to save the TFLite model")
parser.add_argument(
"--checkpoint",
default="",
help='Defines the path to save the checkpoints'
)
args = parser.parse_args()
train(int(args.batch),
int(args.epochs),
int(args.classes),
int(args.size),
args.weights,
int(args.tclasses),
args.tflite,
args.checkpoint)
def save_map_labels(data):
with open('labels.json', 'w') as f:
f.write(str(data))
f.flush()
def generate(batch, size):
"""Data generation and augmentation
# Arguments
batch: Integer, batch size.
size: Integer, image size.
# Returns
train_generator: train set generator
validation_generator: validation set generator
count1: Integer, number of train set.
count2: Integer, number of test set.
"""
# Using the data Augmentation in traning data
ptrain = 'data/train'
pval = 'data/validation'
datagen1 = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen2 = ImageDataGenerator(rescale=1. / 255)
train_generator = datagen1.flow_from_directory(
ptrain,
target_size=(size, size),
batch_size=batch,
class_mode='categorical')
save_map_labels(train_generator.class_indices)
validation_generator = datagen2.flow_from_directory(
pval,
target_size=(size, size),
batch_size=batch,
class_mode='categorical')
count1 = 0
for root, dirs, files in os.walk(ptrain):
for each in files:
count1 += 1
count2 = 0
for root, dirs, files in os.walk(pval):
for each in files:
count2 += 1
return train_generator, validation_generator, count1, count2
def fine_tune(num_classes, weights, model):
"""Re-build model with current num_classes.
# Arguments
num_classes, Integer, The number of classes of dataset.
tune, String, The pre_trained model weights.
model, Model, The model structure.
"""
model.load_weights(weights)
x = model.get_layer('Dropout').output
x = Conv2D(num_classes, (1, 1), padding='same')(x)
x = Activation('softmax', name='softmax')(x)
output = Reshape((num_classes,))(x)
model = Model(inputs=model.input, outputs=output)
return model
def keep_training(weights, model):
model.load_weights(weights)
return model
def create_callbacks(model_checkpoint=""):
"""
# Arguments
None
"""
callbacks = [
EarlyStopping(monitor='val_acc',
patience=30,
verbose=1,
mode='auto',
restore_best_weights=True),
ReduceLROnPlateau(monitor="val_loss",
factor=0.5,
patience=10,
verbose=1,
mode='auto',
min_delta=0.00001,
cooldown=0,
min_lr=0)
]
if model_checkpoint:
callbacks.append(
ModelCheckpoint(
model_checkpoint + 'weights-{epoch:02d}-{val_loss:.2f}-{val_acc:.3f}.hdf5',
verbose=1,
save_best_only=True
)
)
return callbacks
def generate_report(model, generator, batch, count):
y_pred = model.predict_generator(generator, steps= count//batch)
list_ = []
b = count//batch
for i in range(b):
aux = generator[i]
aux2 = 0
for j in aux:
aux2 += 1
if aux2 % 2 == 0:
for k in j:
list_.append(k.tolist())
labels = [ i[0] for i in sorted(generator.class_indices.items(), key=lambda x: x[1])]
print(classification_report(
np.argmax(list_, axis=1),
np.argmax(y_pred, axis=1),
target_names = labels
))
def model_feed(size, num_classes):
'''
Wrapper the model creation
#Arguments
num_classes: Integer, The number of classes to create a model.
size: tuple, The shape of the data.
#Return:
The model
'''
return MobileNetv2(size, num_classes)
def train(batch, epochs, num_classes, size, weights, tclasses, tflite, checkpoint):
"""Train the model.
# Arguments
batch: Integer, The number of train samples per batch.
epochs: Integer, The number of train iterations.
num_classes, Integer, The number of classes of dataset.
size: Integer, image size.
weights, String, The pre_trained model weights.
tclasses, Integer, The number of classes of pre-trained model.
tflite, Boolean, Convert the final model to a tflite model.
checkpoint, String, The path to store the checktpoints
"""
train_generator, validation_generator, count1, count2 = generate(batch, size)
if weights:
if tclasses:
print("fine tunning")
model = model_feed((size, size, 3), tclasses)
model = fine_tune(num_classes, weights, model)
else:
print("Loading Weights")
model = model_feed((size, size, 3), num_classes)
model = keep_training(weights, model)
else:
model = model_feed((size, size, 3), num_classes)
opt = Adam()
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
calls = create_callbacks(model_checkpoint = checkpoint)
hist = model.fit_generator(
train_generator,
validation_data=validation_generator,
steps_per_epoch=count1 // batch,
validation_steps=count2 // batch,
epochs=epochs,
callbacks=calls)
if not os.path.exists('model'):
os.makedirs('model')
df = pd.DataFrame.from_dict(hist.history)
df.to_csv('model/hist.csv', encoding='utf-8', index=False)
print("Saving weights")
model.save_weights('model/weights.h5')
model_name = "mobile_model.h5"
if tflite:
print("Saving model")
model.save(model_name)
print("Converting model")
convert_to_lite(model_name)
def convert_to_lite(model, tflite_name="converted_model"):
"""
Convert a saved model to tf lite format.
# Arguments
model: String, path to .h5 file model
"""
tflite_name += ".tflite"
converter = tf.contrib.lite.TFLiteConverter.from_keras_model_file(model)
tflite_model = converter.convert()
open(tflite_name, "wb").write(tflite_model)
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "joseims/asperathos-monitor",
"score": 2
} |
#### File: tests/mocks/mock_influx.py
```python
class MockInfluxConnector():
""" Constructor of the mock of a InfluxConnector object
Returns:
MockRedis: The simulation of a redis object
"""
def __init__(self):
self.metrics = {}
"""
Simulate the behavior of send_metrics function
Influx Connector api.
Args:
metrics(Objects): Object that must be send to influx.
"""
def send_metrics(self, metrics):
try:
self.metrics[metrics[0]['name']].append(metrics)
except Exception:
self.metrics[metrics[0]['name']] = []
self.metrics[metrics[0]['name']].append(metrics)
``` |
{
"source": "joseins94/toucan",
"score": 2
} |
#### File: toucan/default/util.py
```python
import json
from django.http import JsonResponse
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from ocdskit.util import is_package, is_record_package, is_release, is_release_package
from default.data_file import DataFile
from ocdstoucan.settings import OCDS_TOUCAN_MAXFILESIZE, OCDS_TOUCAN_MAXNUMFILES
def ocds_command(request, command):
context = {
'maxNumOfFiles': OCDS_TOUCAN_MAXNUMFILES,
'maxFileSize': OCDS_TOUCAN_MAXFILESIZE,
'performAction': '/{}/go/'.format(command)
}
return render(request, 'default/{}.html'.format(command), context)
def get_files_from_session(request):
for fileinfo in request.session['files']:
yield DataFile(**fileinfo)
def json_response(files, warnings=None):
file = DataFile('result', '.zip')
file.write_json_to_zip(files)
response = {
'url': file.url,
'size': file.size,
}
if warnings:
response['warnings'] = warnings
return JsonResponse(response)
def make_package(request, published_date, method, warnings):
items = []
for file in get_files_from_session(request):
item = file.json()
if isinstance(item, list):
items.extend(item)
else:
items.append(item)
return json_response({
'result.json': method(items, published_date=published_date),
}, warnings=warnings)
def invalid_request_file_message(f, file_type):
try:
# Only validate JSON files.
if file_type == 'csv xlsx zip':
return
data = json.load(f)
if file_type == 'record-package':
if not is_record_package(data):
return _('Not a record package')
elif file_type == 'release-package':
if not is_release_package(data):
return _('Not a release package')
elif file_type == 'package release':
if not is_release(data) and not is_package(data):
return _('Not a release or package')
elif file_type == 'package package-array':
if (isinstance(data, list) and any(not is_package(item) for item in data) or
not isinstance(data, list) and not is_package(data)):
return _('Not a package or list of packages')
elif file_type == 'release release-array':
if (isinstance(data, list) and any(not is_release(item) for item in data) or
not isinstance(data, list) and not is_release(data)):
return _('Not a release or list of releases')
else:
return _('"%(type)s" not recognized') % {'type': file_type}
except json.JSONDecodeError:
return _('Error decoding JSON')
```
#### File: toucan/tests/test_data_file.py
```python
import unittest
from datetime import date
from default.data_file import DataFile
class DataFileTestCase(unittest.TestCase):
def test_repr_with_folder(self):
data_file = DataFile('result', '.zip', id='identifier', folder='directory')
assert repr(data_file) == 'directory/result-identifier.zip'
def test_repr_without_folder(self):
data_file = DataFile('result', '.zip', id='identifier')
assert repr(data_file) == '{:%Y-%m-%d}/result-identifier.zip'.format(date.today())
```
#### File: toucan/tests/test_upgrade.py
```python
from tests import ViewTestCase, ViewTests
class UpgradeTestCase(ViewTestCase, ViewTests):
url = '/upgrade/'
files = [
'1.0/record-packages/ocds-213czf-000-00001.json',
'1.0/release-packages/0001-tender.json',
'1.0/releases/0001-planning.json',
]
def test_go_with_files(self):
self.assertResults({'type': 'package release'}, {}, {
r'^ocds-213czf-000-00001-[0-9a-f-]{36}-upgraded.json$': 'results/upgrade_record-package.json',
r'^0001-tender-[0-9a-f-]{36}-upgraded.json$': 'results/upgrade_release-package.json',
r'^0001-planning-[0-9a-f-]{36}-upgraded.json$': 'results/upgrade_release.json',
}, load_json=True)
``` |
{
"source": "josejachuf/rest",
"score": 2
} |
#### File: rest/emmett_rest/ext.py
```python
from typing import Any, Dict, List, Optional, Type, Union
from emmett.extensions import Extension, Signals, listen_signal
from emmett.orm.models import MetaModel
from .openapi.mod import OpenAPIModule
from .rest import AppModule, RESTModule
from .parsers import Parser
from .serializers import Serializer
from .wrappers import (
wrap_method_on_obj,
wrap_module_from_app,
wrap_module_from_module
)
class REST(Extension):
default_config = dict(
default_module_class=RESTModule,
default_serializer=Serializer,
default_parser=Parser,
page_param='page',
pagesize_param='page_size',
sort_param='sort_by',
query_param='where',
min_pagesize=1,
max_pagesize=50,
default_pagesize=20,
default_sort=None,
base_path='/',
id_path='/<int:rid>',
list_envelope='data',
single_envelope=False,
groups_envelope='data',
use_envelope_on_parse=False,
serialize_meta=True,
meta_envelope='meta',
default_enabled_methods=[
'index', 'create', 'read', 'update', 'delete'
],
default_disabled_methods=[],
use_save=True,
use_destroy=True
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from .serializers import serialize
from .parsers import parse_params
self._serialize = serialize
self._parse_params = parse_params
@listen_signal(Signals.before_database)
def _configure_models_attr(self):
MetaModel._inheritable_dict_attrs_.append(
('rest_rw', {'id': (True, False)})
)
def on_load(self):
setattr(AppModule, 'rest_module', wrap_module_from_module(self))
self.app.rest_module = wrap_method_on_obj(
wrap_module_from_app(self),
self.app
)
@property
def module(self):
return self.config.default_module_class
@property
def serialize(self):
return self._serialize
@property
def parse_params(self):
return self._parse_params
def docs_module(
self,
import_name: str,
name: str,
title: str,
version: str,
modules_tree_prefix: str,
description: Optional[str] = None,
tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
security_schemes: Optional[Dict[str, Any]] = None,
produce_schemas: bool = False,
expose_ui: Optional[bool] = None,
ui_path: str = "/docs",
url_prefix: Optional[str] = None,
hostname: Optional[str] = None,
module_class: Optional[Type[OpenAPIModule]] = None,
**kwargs: Any
):
module_class = module_class or OpenAPIModule
return module_class.from_app(
self.app,
import_name=import_name,
name=name,
template_folder=None,
template_path=None,
static_folder=None,
static_path=None,
url_prefix=url_prefix,
hostname=hostname,
cache=None,
root_path=None,
pipeline=[],
injectors=[],
opts={
'title': title,
'version': version,
'modules_tree_prefix': modules_tree_prefix,
'description': description,
'tags': tags,
'servers': servers,
'terms_of_service': terms_of_service,
'contact': contact,
'license_info': license_info,
'security_schemes': security_schemes,
'produce_schemas': produce_schemas,
'expose_ui': expose_ui,
'ui_path': ui_path
},
**kwargs
)
```
#### File: emmett_rest/queries/validation.py
```python
from __future__ import annotations
from datetime import datetime
from typing import Any, Callable, Dict, List
from emmett.orm import geo
_geo_helpers = {
'POINT': geo.Point,
'LINE': geo.Line,
'LINESTRING': geo.Line,
'POLYGON': geo.Polygon
}
validate_default = lambda v: v
def _tuplify_list(v: List[Any]):
rv = []
for el in v:
if isinstance(el, list):
el = _tuplify_list(el)
rv.append(el)
return tuple(rv)
def op_validation_generator(*types) -> Callable[[Any], Any]:
def op_validator(v: Any) -> Any:
assert isinstance(v, types)
return v
return op_validator
def validate_glue(v: Any) -> List[Dict[str, Any]]:
assert isinstance(v, list)
for element in v:
assert isinstance(element, dict)
return v
def validate_geo(v: Any) -> Any:
assert isinstance(v, dict) and set(v.keys()) == {'type', 'coordinates'}
objkey = v['type']
geohelper = _geo_helpers.get(objkey.upper())
assert geohelper and isinstance(v['coordinates'], list)
try:
return geohelper(*_tuplify_list(v['coordinates']))
except Exception:
raise AssertionError
def validate_geo_dwithin(v: Any) -> Any:
assert isinstance(v, dict) and set(v.keys()) == {'geometry', 'distance'}
assert v['distance']
obj = validate_geo(v['geometry'])
return (obj, v['distance'])
op_validators = {
'$and': validate_glue,
'$or': validate_glue,
'$eq': validate_default,
'$not': op_validation_generator(dict),
'$ne': validate_default,
'$in': op_validation_generator(list),
'$nin': op_validation_generator(list),
'$lt': op_validation_generator(int, float, datetime),
'$gt': op_validation_generator(int, float, datetime),
'$le': op_validation_generator(int, float, datetime),
'$ge': op_validation_generator(int, float, datetime),
'$lte': op_validation_generator(int, float, datetime),
'$gte': op_validation_generator(int, float, datetime),
'$exists': op_validation_generator(bool),
'$like': validate_default,
'$ilike': validate_default,
'$regex': validate_default,
'$iregex': validate_default,
'$geo.contains': validate_geo,
'$geo.equals': validate_geo,
'$geo.intersects': validate_geo,
'$geo.overlaps': validate_geo,
'$geo.touches': validate_geo,
'$geo.within': validate_geo,
'$geo.dwithin': validate_geo_dwithin
}
```
#### File: rest/tests/test_endpoints_additional.py
```python
import pytest
from emmett.orm import Field, Model
class Sample(Model):
str = Field()
int = Field.int(default=0)
float = Field.float(default=0.0)
@pytest.fixture(scope='function')
def db(migration_db):
return migration_db(Sample)
@pytest.fixture(scope='function')
def rest_app(app, db):
app.pipeline = [db.pipe]
mod = app.rest_module(
__name__, 'sample', Sample, url_prefix='sample',
enabled_methods=['group', 'stats', 'sample']
)
mod.grouping_allowed_fields = ['str']
mod.stats_allowed_fields = ['int', 'float']
return app
@pytest.fixture(scope='function', autouse=True)
def db_sample(db):
with db.connection():
Sample.create(str='foo')
Sample.create(str='foo', int=5, float=5.0)
Sample.create(str='bar', int=10, float=10.0)
@pytest.fixture(scope='function')
def client(rest_app):
return rest_app.test_client()
def test_grouping(client, json_load):
req = client.get('/sample/group/str', query_string={'sort_by': '-count'})
assert req.status == 200
data = json_load(req.data)
assert data['meta']['total_objects'] == 2
assert data['data'][0]['value'] == 'foo'
assert data['data'][0]['count'] == 2
assert data['data'][1]['value'] == 'bar'
assert data['data'][1]['count'] == 1
def test_stats(client, json_load):
req = client.get('/sample/stats', query_string={'fields': 'int,float'})
assert req.status == 200
data = json_load(req.data)
assert data['int']['min'] == 0
assert data['int']['max'] == 10
assert data['int']['avg'] == 5
assert data['float']['min'] == 0.0
assert data['float']['max'] == 10.0
assert data['float']['avg'] == 5.0
def test_sample(client, json_load):
req = client.get('/sample/sample')
assert req.status == 200
data = json_load(req.data)
assert data['meta']['total_objects'] == 3
assert not data['meta']['has_more']
```
#### File: rest/tests/test_envelopes.py
```python
import pytest
from emmett.orm import Field, Model
class Sample(Model):
str = Field()
@pytest.fixture(scope='function')
def db(migration_db):
return migration_db(Sample)
@pytest.fixture(scope='function')
def rest_app(app, db):
app.pipeline = [db.pipe]
return app
@pytest.fixture(scope='function', autouse=True)
def db_sample(db):
with db.connection():
Sample.create(str='foo')
@pytest.fixture(scope='function')
def client_default(rest_app):
rest_app.rest_module(
__name__, 'sample', Sample, url_prefix='sample'
)
return rest_app.test_client()
@pytest.fixture(scope='function')
def client_envelopes(rest_app):
rest_app.rest_module(
__name__, 'sample', Sample, url_prefix='sample',
single_envelope='sample', list_envelope='samples',
use_envelope_on_parse=True
)
return rest_app.test_client()
def test_default_index(client_default, json_load):
req = client_default.get('/sample')
assert req.status == 200
data = json_load(req.data)
assert {'data', 'meta'} == set(data.keys())
def test_default_get(client_default, json_load, db):
with db.connection():
row = Sample.first()
req = client_default.get(f'/sample/{row.id}')
assert req.status == 200
data = json_load(req.data)
assert {'id', 'str'} == set(data.keys())
def test_envelopes_index(client_envelopes, json_load):
req = client_envelopes.get('/sample')
assert req.status == 200
data = json_load(req.data)
assert {'samples', 'meta'} == set(data.keys())
def test_envelopes_get(client_envelopes, json_load, db):
with db.connection():
row = Sample.first()
req = client_envelopes.get(f'/sample/{row.id}')
assert req.status == 200
data = json_load(req.data)
assert {'sample'} == set(data.keys())
def test_envelopes_create(client_envelopes, json_load, json_dump):
req = client_envelopes.post(
'/sample',
data=json_dump({'sample': {'str': 'foo'}}),
headers=[('content-type', 'application/json')]
)
assert req.status == 201
data = json_load(req.data)
assert {'sample'} == set(data.keys())
assert data['sample']['id']
def test_envelopes_update(client_envelopes, json_load, json_dump):
req = client_envelopes.post(
'/sample',
data=json_dump({'sample': {'str': 'foo'}}),
headers=[('content-type', 'application/json')]
)
data = json_load(req.data)
rid = data['sample']['id']
req = client_envelopes.put(
f'/sample/{rid}',
data=json_dump({'sample': {'str': 'baz'}}),
headers=[('content-type', 'application/json')]
)
assert req.status == 200
data = json_load(req.data)
assert data['sample']['str'] == 'baz'
``` |
{
"source": "josejnra/publishing-python-package",
"score": 3
} |
#### File: src/my_first_package/list_dir.py
```python
import os
def list_directories() -> str:
for dir in os.listdir():
yield dir
``` |
{
"source": "josejo911/Mini4-Mancala",
"score": 3
} |
#### File: josejo911/Mini4-Mancala/Montecarlo.py
```python
from Rules import Rules
import random
import copy
class Montecarlo:
def __init__(self, n=1000):
self.iterations = n
self.game = Rules()
def gameOver(self):
winner = self.game.get_winner(end=True)
if winner == 1:
print('AI Winner')
elif winner == 0:
print('The game ends in Tie')
else:
print('Player One Wins')
exit(0)
def start(self):
finish = False
results = [0,0,0,0,0,0]
while not finish:
for i in range(self.iterations):
copyActualGame = copy.deepcopy(self.game)
posibleMove = copyActualGame.posibleMove()
if posibleMove == []:
self.gameOver()
frstMove = random.choice(posibleMove)
copyActualGame.play(frstMove)
while True:
posibleMove = copyActualGame.posibleMove()
if (posibleMove == []):
break
copyActualGame.play(random.choice(posibleMove))
if copyActualGame.get_winner() == 1:
results[frstMove] += 1
while self.game.plyrTurn == 1:
moves = results.index(max(results))
self.game.play(moves)
print('AI Moves {}'.format(moves))
while self.game.plyrTurn == 2:
print(self.game)
posibleMove = self.game.posibleMove()
if posibleMove == []:
self.gameOver()
print('Game Over')
for number in range(len(posibleMove)):
posibleMove[number] -= 6
print('Posible moves are: {}'.format(posibleMove))
player2moves = input()
if int(player2moves) not in posibleMove:
while True:
print('Bro... thats not a move ')
player2moves = input()
if int(player2moves) in posibleMove:
break
self.game.play(int(player2moves)+6)
print(self.game)
juego = Montecarlo()
juego.start()
```
#### File: josejo911/Mini4-Mancala/Rules.py
```python
class Rules:
def __init__(self):
self.table = [4,4,4,4,4,4,0,4,4,4,4,4,4,0]
self.plyrTurn = 1
self.frstMove = None
def play(self,move_position):
stack = self.table[move_position]
self.table[move_position] = 0
last_turn = self.dist(stack,move_position)
self.specialMove(last_turn)
self.nextTurn()
def dist(self, stack, takenPos):
actualPos = 0
while stack > 0:
takenPos += 1
if (actualPos+takenPos) >= len(self.table):
actualPos -= 14
if self.plyrTurn == 1 and (actualPos+takenPos) != 13:
self.table[actualPos+takenPos] += 1
stack -= 1
if self.plyrTurn == 2 and (actualPos+takenPos) != 6:
self.table[actualPos+takenPos] += 1
stack -= 1
lastStone = (actualPos+takenPos)
return lastStone
def posibleMove(self):
posibleMoves = []
if self.plyrTurn == 1:
posibleMoves = [0,1,2,3,4,5]
else:
posibleMoves = [7,8,9,10,11,12]
elements_to_delete = []
for x in posibleMoves:
if self.table[x] == 0:
elements_to_delete.append(x)
for x in elements_to_delete:
posibleMoves.remove(x)
return posibleMoves
def specialMove(self, finalPos):
# Jugador 1
if self.plyrTurn == 1:
if finalPos == 6:
self.nextTurn()
elif self.table[finalPos] == 1:
self.table[finalPos] = 0
self.table[6] += 1
self.table[6] += self.table[12-finalPos]
self.table[12-finalPos] = 0
# Jugador 2
else:
if finalPos == 13:
self.nextTurn()
elif self.table[finalPos] == 1:
self.table[finalPos] = 0
self.table[13] += 1
self.table[13] += self.table[12-finalPos]
self.table[12-finalPos] = 0
def nextTurn(self):
next_player = self.plyrTurn % 2
self.plyrTurn = next_player + 1
def get_winner(self, end=False):
plyr1 = sum(self.table[0:7])
plyr2 = sum(self.table[7:14])
if end:
print('Player 1: {}'.format(plyr2))
print('AI: {}'.format(plyr1))
if (plyr1 > plyr2):
return 1
elif(plyr1 < plyr2):
return 2
else:
return 0
def getFrstMove(self):
return self.frstMove
def setFrstMove(self, move_1):
self.frstMove = move_1
def print_table(self):
return self.table
def __str__(self):
tabl_reverse = self.table[::-1]
table_en_string = " " + str(tabl_reverse[8:]) + "\n"
table_en_string += str(self.table[6])+ " "+ str(self.table[13]) + "\n"
table_en_string += " " + str(self.table[7:13])
return table_en_string
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.