max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Beheer/tests.py | RamonvdW/nhb-apps | 1 | 7400 | # -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from TestHelpers.e2ehelpers import E2EHelpers
# updaten met dit commando:
# for x in `./manage.py show_urls --settings=nhbapps.settings_dev | rev | cut -d'/' -f2- | rev | grep '/beheer/'`; do echo "'$x/',"; done | grep -vE ':object_id>/|/add/|/autocomplete/'
BEHEER_PAGINAS = (
'/beheer/Account/account/',
'/beheer/Account/accountemail/',
'/beheer/BasisTypen/boogtype/',
'/beheer/BasisTypen/indivwedstrijdklasse/',
'/beheer/BasisTypen/kalenderwedstrijdklasse/',
'/beheer/BasisTypen/leeftijdsklasse/',
'/beheer/BasisTypen/teamtype/',
'/beheer/BasisTypen/teamwedstrijdklasse/',
'/beheer/Competitie/competitie/',
'/beheer/Competitie/competitieklasse/',
'/beheer/Competitie/competitiemutatie/',
'/beheer/Competitie/deelcompetitie/',
'/beheer/Competitie/deelcompetitieklasselimiet/',
'/beheer/Competitie/deelcompetitieronde/',
'/beheer/Competitie/kampioenschapschutterboog/',
'/beheer/Competitie/regiocompetitierondeteam/',
'/beheer/Competitie/regiocompetitieschutterboog/',
'/beheer/Competitie/regiocompetitieteam/',
'/beheer/Competitie/regiocompetitieteampoule/',
'/beheer/Functie/functie/',
'/beheer/Functie/verklaringhanterenpersoonsgegevens/',
'/beheer/HistComp/histcompetitie/',
'/beheer/HistComp/histcompetitieindividueel/',
'/beheer/HistComp/histcompetitieteam/',
'/beheer/Kalender/kalenderwedstrijd/',
'/beheer/Kalender/kalenderwedstrijddeeluitslag/',
'/beheer/Kalender/kalenderwedstrijdsessie/',
'/beheer/Logboek/logboekregel/',
'/beheer/Mailer/mailqueue/',
'/beheer/NhbStructuur/nhbcluster/',
'/beheer/NhbStructuur/nhbrayon/',
'/beheer/NhbStructuur/nhbregio/',
'/beheer/NhbStructuur/nhbvereniging/',
'/beheer/NhbStructuur/speelsterkte/',
'/beheer/Overig/sitefeedback/',
'/beheer/Overig/sitetijdelijkeurl/',
'/beheer/Records/besteindivrecords/',
'/beheer/Records/indivrecord/',
'/beheer/Score/score/',
'/beheer/Score/scorehist/',
'/beheer/Sporter/sporter/',
'/beheer/Sporter/sporterboog/',
'/beheer/Sporter/sportervoorkeuren/',
'/beheer/Taken/taak/',
'/beheer/Wedstrijden/competitiewedstrijd/',
'/beheer/Wedstrijden/competitiewedstrijdenplan/',
'/beheer/Wedstrijden/competitiewedstrijduitslag/',
'/beheer/Wedstrijden/wedstrijdlocatie/',
'/beheer/auth/group/',
'/beheer/jsi18n/',
'/beheer/login/',
'/beheer/logout/',
'/beheer/password_change/',
)
class TestBeheer(E2EHelpers, TestCase):
""" unit tests voor de Beheer applicatie """
def setUp(self):
""" initialisatie van de test case """
self.account_admin = self.e2e_create_account_admin()
def test_login(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:login') # interne url
self.assertEqual(url, '/beheer/login/')
self.e2e_logout()
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/', 302))
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/?next=/records/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/login/')
def test_index(self):
# voordat 2FA verificatie gedaan is
self.e2e_login(self.account_admin)
# redirect naar wissel-van-rol pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/functie/otp-controle/?next=/beheer/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/')
# na 2FA verificatie
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertTrue(len(resp.redirect_chain) == 0)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, '<title>Websitebeheer | Django-websitebeheer</title>')
# onnodig via beheer-login naar post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/records/', 302))
# onnodig via beheer-login zonder post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/plein/', 302))
def test_logout(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:logout') # interne url
self.assertEqual(url, '/beheer/logout/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/logout/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/logout/', 302))
def test_pw_change(self):
url = reverse('admin:password_change')
self.assertEqual(url, '/beheer/password_change/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, 'Nieuw wachtwoord')
self.assertEqual(resp.redirect_chain[-1], ('/account/nieuw-wachtwoord/', 302))
def test_queries(self):
# controleer dat alle beheer pagina's het goed doen
settings.DEBUG = True
self.e2e_login_and_pass_otp(self.account_admin)
for url in BEHEER_PAGINAS:
with self.assert_max_queries(20):
self.client.get(url)
with self.assert_max_queries(20):
self.client.get(url + 'add/')
with self.assert_max_queries(20):
self.client.get(url + '1/change/')
# for
settings.DEBUG = False
# end of file
| # -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from TestHelpers.e2ehelpers import E2EHelpers
# updaten met dit commando:
# for x in `./manage.py show_urls --settings=nhbapps.settings_dev | rev | cut -d'/' -f2- | rev | grep '/beheer/'`; do echo "'$x/',"; done | grep -vE ':object_id>/|/add/|/autocomplete/'
BEHEER_PAGINAS = (
'/beheer/Account/account/',
'/beheer/Account/accountemail/',
'/beheer/BasisTypen/boogtype/',
'/beheer/BasisTypen/indivwedstrijdklasse/',
'/beheer/BasisTypen/kalenderwedstrijdklasse/',
'/beheer/BasisTypen/leeftijdsklasse/',
'/beheer/BasisTypen/teamtype/',
'/beheer/BasisTypen/teamwedstrijdklasse/',
'/beheer/Competitie/competitie/',
'/beheer/Competitie/competitieklasse/',
'/beheer/Competitie/competitiemutatie/',
'/beheer/Competitie/deelcompetitie/',
'/beheer/Competitie/deelcompetitieklasselimiet/',
'/beheer/Competitie/deelcompetitieronde/',
'/beheer/Competitie/kampioenschapschutterboog/',
'/beheer/Competitie/regiocompetitierondeteam/',
'/beheer/Competitie/regiocompetitieschutterboog/',
'/beheer/Competitie/regiocompetitieteam/',
'/beheer/Competitie/regiocompetitieteampoule/',
'/beheer/Functie/functie/',
'/beheer/Functie/verklaringhanterenpersoonsgegevens/',
'/beheer/HistComp/histcompetitie/',
'/beheer/HistComp/histcompetitieindividueel/',
'/beheer/HistComp/histcompetitieteam/',
'/beheer/Kalender/kalenderwedstrijd/',
'/beheer/Kalender/kalenderwedstrijddeeluitslag/',
'/beheer/Kalender/kalenderwedstrijdsessie/',
'/beheer/Logboek/logboekregel/',
'/beheer/Mailer/mailqueue/',
'/beheer/NhbStructuur/nhbcluster/',
'/beheer/NhbStructuur/nhbrayon/',
'/beheer/NhbStructuur/nhbregio/',
'/beheer/NhbStructuur/nhbvereniging/',
'/beheer/NhbStructuur/speelsterkte/',
'/beheer/Overig/sitefeedback/',
'/beheer/Overig/sitetijdelijkeurl/',
'/beheer/Records/besteindivrecords/',
'/beheer/Records/indivrecord/',
'/beheer/Score/score/',
'/beheer/Score/scorehist/',
'/beheer/Sporter/sporter/',
'/beheer/Sporter/sporterboog/',
'/beheer/Sporter/sportervoorkeuren/',
'/beheer/Taken/taak/',
'/beheer/Wedstrijden/competitiewedstrijd/',
'/beheer/Wedstrijden/competitiewedstrijdenplan/',
'/beheer/Wedstrijden/competitiewedstrijduitslag/',
'/beheer/Wedstrijden/wedstrijdlocatie/',
'/beheer/auth/group/',
'/beheer/jsi18n/',
'/beheer/login/',
'/beheer/logout/',
'/beheer/password_change/',
)
class TestBeheer(E2EHelpers, TestCase):
""" unit tests voor de Beheer applicatie """
def setUp(self):
""" initialisatie van de test case """
self.account_admin = self.e2e_create_account_admin()
def test_login(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:login') # interne url
self.assertEqual(url, '/beheer/login/')
self.e2e_logout()
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/', 302))
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/login/?next=/records/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/login/')
def test_index(self):
# voordat 2FA verificatie gedaan is
self.e2e_login(self.account_admin)
# redirect naar wissel-van-rol pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/functie/otp-controle/?next=/beheer/', 302))
self.e2e_assert_other_http_commands_not_supported('/beheer/')
# na 2FA verificatie
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/', follow=True)
self.assertTrue(len(resp.redirect_chain) == 0)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, '<title>Websitebeheer | Django-websitebeheer</title>')
# onnodig via beheer-login naar post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/?next=/records/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/records/', 302))
# onnodig via beheer-login zonder post-authenticatie pagina
with self.assert_max_queries(20):
resp = self.client.get('/beheer/login/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/plein/', 302))
def test_logout(self):
# controleer dat de admin login vervangen is door een redirect naar onze eigen login
url = reverse('admin:logout') # interne url
self.assertEqual(url, '/beheer/logout/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/beheer/logout/', follow=True)
self.assertEqual(resp.redirect_chain[-1], ('/account/logout/', 302))
def test_pw_change(self):
url = reverse('admin:password_change')
self.assertEqual(url, '/beheer/password_change/')
self.e2e_login_and_pass_otp(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assertContains(resp, 'Nieuw wachtwoord')
self.assertEqual(resp.redirect_chain[-1], ('/account/nieuw-wachtwoord/', 302))
def test_queries(self):
# controleer dat alle beheer pagina's het goed doen
settings.DEBUG = True
self.e2e_login_and_pass_otp(self.account_admin)
for url in BEHEER_PAGINAS:
with self.assert_max_queries(20):
self.client.get(url)
with self.assert_max_queries(20):
self.client.get(url + 'add/')
with self.assert_max_queries(20):
self.client.get(url + '1/change/')
# for
settings.DEBUG = False
# end of file
| nl | 0.717875 | # -*- coding: utf-8 -*- # Copyright (c) 2020-2021 <NAME>. # All rights reserved. # Licensed under BSD-3-Clause-Clear. See LICENSE file for details. # updaten met dit commando: # for x in `./manage.py show_urls --settings=nhbapps.settings_dev | rev | cut -d'/' -f2- | rev | grep '/beheer/'`; do echo "'$x/',"; done | grep -vE ':object_id>/|/add/|/autocomplete/' unit tests voor de Beheer applicatie initialisatie van de test case # controleer dat de admin login vervangen is door een redirect naar onze eigen login # interne url # voordat 2FA verificatie gedaan is # redirect naar wissel-van-rol pagina # na 2FA verificatie # 200 = OK # onnodig via beheer-login naar post-authenticatie pagina # onnodig via beheer-login zonder post-authenticatie pagina # controleer dat de admin login vervangen is door een redirect naar onze eigen login # interne url # 200 = OK # controleer dat alle beheer pagina's het goed doen # for # end of file | 1.634699 | 2 |
L1Trigger/TrackFindingTracklet/python/ProducerKF_cff.py | Jingyan95/cmssw | 5 | 7401 | import FWCore.ParameterSet.Config as cms
from L1Trigger.TrackTrigger.ProducerSetup_cff import TrackTriggerSetup
from L1Trigger.TrackerTFP.Producer_cfi import TrackerTFPProducer_params
from L1Trigger.TrackerTFP.ProducerES_cff import TrackTriggerDataFormats
from L1Trigger.TrackerTFP.ProducerLayerEncoding_cff import TrackTriggerLayerEncoding
from L1Trigger.TrackerTFP.KalmanFilterFormats_cff import TrackTriggerKalmanFilterFormats
from L1Trigger.TrackFindingTracklet.ChannelAssignment_cff import ChannelAssignment
from L1Trigger.TrackFindingTracklet.ProducerKF_cfi import TrackFindingTrackletProducerKF_params
TrackFindingTrackletProducerKFin = cms.EDProducer( 'trklet::ProducerKFin', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKF = cms.EDProducer( 'trackerTFP::ProducerKF', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerTT = cms.EDProducer( 'trklet::ProducerTT', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerAS = cms.EDProducer( 'trklet::ProducerAS', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKFout = cms.EDProducer( 'trklet::ProducerKFout', TrackFindingTrackletProducerKF_params ) | import FWCore.ParameterSet.Config as cms
from L1Trigger.TrackTrigger.ProducerSetup_cff import TrackTriggerSetup
from L1Trigger.TrackerTFP.Producer_cfi import TrackerTFPProducer_params
from L1Trigger.TrackerTFP.ProducerES_cff import TrackTriggerDataFormats
from L1Trigger.TrackerTFP.ProducerLayerEncoding_cff import TrackTriggerLayerEncoding
from L1Trigger.TrackerTFP.KalmanFilterFormats_cff import TrackTriggerKalmanFilterFormats
from L1Trigger.TrackFindingTracklet.ChannelAssignment_cff import ChannelAssignment
from L1Trigger.TrackFindingTracklet.ProducerKF_cfi import TrackFindingTrackletProducerKF_params
TrackFindingTrackletProducerKFin = cms.EDProducer( 'trklet::ProducerKFin', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKF = cms.EDProducer( 'trackerTFP::ProducerKF', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerTT = cms.EDProducer( 'trklet::ProducerTT', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerAS = cms.EDProducer( 'trklet::ProducerAS', TrackFindingTrackletProducerKF_params )
TrackFindingTrackletProducerKFout = cms.EDProducer( 'trklet::ProducerKFout', TrackFindingTrackletProducerKF_params ) | none | 1 | 1.271053 | 1 |
|
py_cui/__init__.py | ne-msft/py_cui | 0 | 7402 | """A python library for intuitively creating CUI/TUI interfaces with pre-built widgets.
"""
#
# Author: <NAME>
# Created: 12-Aug-2019
# Docs: https://jwlodek.github.io/py_cui-docs
# License: BSD-3-Clause (New/Revised)
#
# Some python core library imports
import sys
import os
import time
import copy
import shutil # We use shutil for getting the terminal dimensions
import threading # Threading is used for loading icon popups
import logging # Use logging library for debug purposes
# py_cui uses the curses library. On windows this does not exist, but
# there is a open source windows-curses module that adds curses support
# for python on windows
import curses
# py_cui imports
import py_cui
import py_cui.keys
import py_cui.statusbar
import py_cui.widgets
import py_cui.controls
import py_cui.dialogs
import py_cui.widget_set
import py_cui.popups
import py_cui.renderer
import py_cui.debug
import py_cui.errors
from py_cui.colors import *
# Version number
__version__ = '0.1.3'
def fit_text(width, text, center=False):
"""Fits text to screen size
Helper function to fit text within a given width. Used to fix issue with status/title bar text
being too long
Parameters
----------
width : int
width of window in characters
text : str
input text
center : Boolean
flag to center text
Returns
-------
fitted_text : str
text fixed depending on width
"""
if width < 5:
return '.' * width
if len(text) >= width:
return text[:width - 5] + '...'
else:
total_num_spaces = (width - len(text) - 1)
if center:
left_spaces = int(total_num_spaces / 2)
right_spaces = int(total_num_spaces / 2)
if(total_num_spaces % 2 == 1):
right_spaces = right_spaces + 1
return ' ' * left_spaces + text + ' ' * right_spaces
else:
return text + ' ' * total_num_spaces
class PyCUI:
"""Base CUI class
Main user interface class for py_cui. To create a user interface, you must
first create an instance of this class, and then add cells + widgets to it.
Attributes
----------
cursor_x, cursor_y : int
absolute position of the cursor in the CUI
grid : py_cui.grid.Grid
The main layout manager for the CUI
widgets : dict of str - py_cui.widgets.Widget
dict of widget in the grid
title_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the top of the CUI
status_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the bottom of the CUI
keybindings : list of py_cui.keybinding.KeyBinding
list of keybindings to check against in the main CUI loop
height, width : int
height of the terminal in characters, width of terminal in characters
exit_key : key_code
a key code for a key that exits the CUI
simulated_terminal : List[int]
Dimensions for an alternative simulated terminal (used for testing)
"""
def __init__(self, num_rows, num_cols, auto_focus_buttons=True,
exit_key=py_cui.keys.KEY_Q_LOWER, simulated_terminal=None):
"""Constructor for PyCUI class
"""
self._title = 'PyCUI Window'
# When this is not set, the escape character delay
# is too long for exiting focus mode
os.environ.setdefault('ESCDELAY', '25')
# For unit testing purposes, we want to simulate terminal
# dimensions so that we don't get errors
self._simulated_terminal = simulated_terminal
if self._simulated_terminal is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
height = simulated_terminal[0]
width = simulated_terminal[1]
# Init terminal height width. Subtract 4 from height
# for title/status bar and padding
self._height = height
self._width = width
self._height = self._height - 4
# Add status and title bar
self.title_bar = py_cui.statusbar.StatusBar(self._title, BLACK_ON_WHITE)
exit_key_char = py_cui.keys.get_char_from_ascii(exit_key)
self._init_status_bar_text = 'Press - {} - to exit. Arrow Keys to move ' \
'between widgets. Enter to enter focus ' \
'mode.'.format(exit_key_char)
self.status_bar = py_cui.statusbar.StatusBar(self._init_status_bar_text,
BLACK_ON_WHITE)
# Logging object initialization for py_cui
self._logger = py_cui.debug._initialize_logger(self,
name='py_cui')
# Initialize grid, renderer, and widget dict
self._grid = py_cui.grid.Grid(num_rows, num_cols, self._height, self._width, self._logger)
self._renderer = None
self._border_characters = None
self._stdscr = None
self._widgets = {}
self._refresh_timeout = -1
# Variables for determining selected widget/focus mode
self._selected_widget = None
self._in_focused_mode = False
self._popup = None
self._auto_focus_buttons = auto_focus_buttons
# CUI blocks when loading popup is open
self._loading = False
self._stopped = False
self._post_loading_callback = None
self._on_draw_update_func = None
# Top level keybindings. Exit key is 'q' by default
self._keybindings = {}
self._exit_key = exit_key
self._forward_cycle_key = py_cui.keys.KEY_CTRL_LEFT
self._reverse_cycle_key = py_cui.keys.KEY_CTRL_RIGHT
# Callback to fire when CUI is stopped.
self._on_stop = None
def set_refresh_timeout(self, timeout):
"""Sets the CUI auto-refresh timeout to a number of seconds.
Parameters
----------
timeout : int
Number of seconds to wait before refreshing the CUI
"""
# We want the refresh timeout in milliseconds as an integer
self._refresh_timeout = int(timeout * 1000)
def set_on_draw_update_func(self, update_function):
"""Adds a function that is fired during each draw call of the CUI
Parameters
----------
update_function : function
A no-argument or lambda function that is fired at the start of each draw call
"""
self._on_draw_update_func = update_function
def set_widget_cycle_key(self, forward_cycle_key=None, reverse_cycle_key=None):
"""Assigns a key for automatically cycling through widgets in both focus and overview modes
Parameters
----------
widget_cycle_key : py_cui.keys.KEY
Key code for key to cycle through widgets
"""
if forward_cycle_key is not None:
self._forward_cycle_key = forward_cycle_key
if reverse_cycle_key is not None:
self._reverse_cycle_key = reverse_cycle_key
def enable_logging(self, log_file_path='py_cui_log.txt', logging_level = logging.DEBUG):
"""Function enables logging for py_cui library
Parameters
----------
log_file_path : str
The target log filepath. Default 'py_cui_log.txt
logging_level : int
Default logging level = logging.DEBUG
"""
try:
py_cui.debug._enable_logging(self._logger, filename=log_file_path, logging_level=logging_level)
self._logger.info('Initialized logger')
except PermissionError as e:
print('Failed to initialize logger: {}'.format(str(e)))
def apply_widget_set(self, new_widget_set):
"""Function that replaces all widgets in a py_cui with those of a different widget set
Parameters
----------
new_widget_set : WidgetSet
The new widget set to switch to
Raises
------
TypeError
If input is not of type WidgetSet
"""
if isinstance(new_widget_set, py_cui.widget_set.WidgetSet):
self.lose_focus()
self._widgets = new_widget_set._widgets
self._grid = new_widget_set._grid
self._keybindings = new_widget_set._keybindings
if self._simulated_terminal is None:
if self._stdscr is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
# Use curses termsize when possible to fix resize bug on windows.
height, width = self._stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
self._refresh_height_width(height, width)
if self._stdscr is not None:
self._initialize_widget_renderer()
self._selected_widget = new_widget_set._selected_widget
else:
raise TypeError('Argument must be of type py_cui.widget_set.WidgetSet')
def create_new_widget_set(self, num_rows, num_cols):
"""Function that is used to create additional widget sets
Use this function instead of directly creating widget set object instances, to allow
for logging support.
Parameters
----------
num_rows : int
row count for new widget set
num_cols : int
column count for new widget set
Returns
-------
new_widget_set : py_cui.widget_set.WidgetSet
The new widget set object instance
"""
# Use current logging object and simulated terminal for sub-widget sets
return py_cui.widget_set.WidgetSet(num_rows, num_cols, self._logger,
simulated_terminal=self._simulated_terminal)
# ----------------------------------------------#
# Initialization functions #
# Used to initialzie CUI and its features #
# ----------------------------------------------#
def start(self):
"""Function that starts the CUI
"""
self._logger.info('Starting {} CUI'.format(self._title))
curses.wrapper(self._draw)
def stop(self):
"""Function that stops the CUI, and fires the callback function.
Callback must be a no arg method
"""
self._logger.info('Stopping CUI')
self._stopped = True
def run_on_exit(self, command):
"""Sets callback function on CUI exit. Must be a no-argument function or lambda function
Parameters
----------
command : function
A no-argument or lambda function to be fired on exit
"""
self._on_stop = command
def set_title(self, title):
"""Sets the title bar text
Parameters
----------
title : str
New title for CUI
"""
self._title = title
def set_status_bar_text(self, text):
"""Sets the status bar text when in overview mode
Parameters
----------
text : str
Status bar text
"""
self._init_status_bar_text = text
self.status_bar.set_text(text)
def _initialize_colors(self):
"""Function for initialzing curses colors. Called when CUI is first created.
"""
# Start colors in curses.
# For each color pair in color map, initialize color combination.
curses.start_color()
curses.init_color(curses.COLOR_BLUE, 0, 0, 500)
for color_pair in py_cui.colors._COLOR_MAP.keys():
fg_color, bg_color = py_cui.colors._COLOR_MAP[color_pair]
curses.init_pair(color_pair, fg_color, bg_color)
def _initialize_widget_renderer(self):
"""Function that creates the renderer object that will draw each widget
"""
if self._renderer is None:
self._renderer = py_cui.renderer.Renderer(self, self._stdscr, self._logger)
for widget_id in self._widgets.keys():
self._widgets[widget_id]._assign_renderer(self._renderer)
if self._popup is not None:
self._popup._assign_renderer(self._renderer)
def toggle_unicode_borders(self):
"""Function for toggling unicode based border rendering
"""
if self._border_characters is None or self._border_characters['UP_LEFT'] == '+':
self.set_widget_border_characters('\u256d', '\u256e', '\u2570', '\u256f', '\u2500', '\u2502')
else:
self.set_widget_border_characters('+', '+', '+', '+', '-', '|')
def set_widget_border_characters(self, upper_left_corner, upper_right_corner, lower_left_corner, lower_right_corner, horizontal, vertical):
"""Function that can be used to set arbitrary border characters for drawing widget borders by renderer.
Parameters
----------
upper_left_corner : char
Upper left corner character
upper_right_corner : char
Upper right corner character
lower_left_corner : char
Upper left corner character
lower_right_corner : char
Lower right corner character
horizontal : char
Horizontal border character
vertical : char
Vertical border character
"""
self._border_characters = {
'UP_LEFT': upper_left_corner,
'UP_RIGHT': upper_right_corner,
'DOWN_LEFT': lower_left_corner,
'DOWN_RIGHT': lower_right_corner,
'HORIZONTAL': horizontal,
'VERTICAL': vertical
}
self._logger.info('Set border_characters to {}'.format(self._border_characters))
def get_widgets(self):
"""Function that gets current set of widgets
Returns
-------
widgets : dict of str -> widget
dictionary mapping widget IDs to object instances
"""
return self._widgets
# Widget add functions. Each of these adds a particular type of widget
# to the grid in a specified location.
def add_scroll_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0) -> py_cui.widgets.ScrollMenu:
"""Function that adds a new scroll menu to the CUI grid
Parameters
----------
title : str
The title of the scroll menu
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_scroll_menu : ScrollMenu
A reference to the created scroll menu object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_scroll_menu = py_cui.widgets.ScrollMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_scroll_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_scroll_menu))))
return new_scroll_menu
def add_checkbox_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0, checked_char='X') -> py_cui.widgets.CheckBoxMenu:
"""Function that adds a new checkbox menu to the CUI grid
Parameters
----------
title : str
The title of the checkbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
checked_char='X' : char
The character used to mark 'Checked' items
Returns
-------
new_checkbox_menu : CheckBoxMenu
A reference to the created checkbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_checkbox_menu = py_cui.widgets.CheckBoxMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
checked_char)
self._widgets[id] = new_checkbox_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_checkbox_menu))))
return new_checkbox_menu
def add_text_box(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '', password = False) -> py_cui.widgets.TextBox:
"""Function that adds a new text box to the CUI grid
Parameters
----------
title : str
The title of the textbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the textbox
password=False : bool
Toggle to show '*' instead of characters.
Returns
-------
new_text_box : TextBox
A reference to the created textbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_box = py_cui.widgets.TextBox(id,
title,
self._grid,
row, column,
row_span,
column_span,
padx, pady,
self._logger,
initial_text,
password)
self._widgets[id] = new_text_box
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_box))))
return new_text_box
def add_text_block(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '') -> py_cui.widgets.ScrollTextBlock:
"""Function that adds a new text block to the CUI grid
Parameters
----------
title : str
The title of the text block
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the text block
Returns
-------
new_text_block : ScrollTextBlock
A reference to the created textblock object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_block = py_cui.widgets.ScrollTextBlock(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
initial_text)
self._widgets[id] = new_text_block
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_block))))
return new_text_block
def add_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0) -> py_cui.widgets.Label:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_label : Label
A reference to the created label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.Label(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_block_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, center=True) -> py_cui.widgets.BlockLabel:
"""Function that adds a new block label to the CUI grid
Parameters
----------
title : str
The title of the block label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
center : bool
flag to tell label to be centered or left-aligned.
Returns
-------
new_label : BlockLabel
A reference to the created block label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.BlockLabel(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
center,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_button(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, command=None) -> py_cui.widgets.Button:
"""Function that adds a new button to the CUI grid
Parameters
----------
title : str
The title of the button
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
command=None : Function
A no-argument or lambda function to fire on button press.
Returns
-------
new_button : Button
A reference to the created button object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_button = py_cui.widgets.Button(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
command)
self._widgets[id] = new_button
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_button))))
return new_button
def add_slider(self, title, row, column, row_span=1,
column_span=1, padx=1, pady=0,
min_val=0, max_val=100, step=1, init_val=0) -> py_cui.controls.slider.SliderWidget:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
min_val = 0 int
min value of the slider
max_val = 0 int
max value of the slider
step = 0 int
step to incremento or decrement
init_val = 0 int
initial value of the slider
Returns
-------
new_slider : Slider
A reference to the created slider object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_slider = py_cui.controls.slider.SliderWidget(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
min_val,
max_val,
step,
init_val)
self._widgets[id] = new_slider
self._logger.info('Adding widget {} w/ ID {} of type {}'
.format(title, id, str(type(new_slider))))
return new_slider
def get_element_at_position(self, x, y):
"""Returns containing widget for character position
Parameters
----------
x : int
Horizontal character position
y : int
Vertical character position, top down
Returns
-------
in_widget : UIElement
Widget or popup that is within the position None if nothing
"""
if self._popup is not None and self._popup._contains_position(x, y):
return self._popup
elif self._popup is None:
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._contains_position(x, y):
return self.get_widgets()[widget_id]
return None
def _get_horizontal_neighbors(self, widget, direction):
"""Gets all horizontal (left, right) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
_, num_cols = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_LEFT_ARROW:
col_range_start = 0
col_range_stop = col_start
else:
col_range_start = col_start + col_span
col_range_stop = num_cols
for col in range(col_range_start, col_range_stop):
for row in range(row_start, row_start + row_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_LEFT_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
def _get_vertical_neighbors(self, widget, direction):
"""Gets all vertical (up, down) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
num_rows, _ = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_UP_ARROW:
row_range_start = 0
row_range_stop = row_start
else:
row_range_start = row_start + row_span
row_range_stop = num_rows
for row in range(row_range_start, row_range_stop):
for col in range(col_start, col_start + col_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_UP_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
# CUI status functions. Used to switch between widgets, set the mode, and
# identify neighbors for overview mode
def _check_if_neighbor_exists(self, direction):
"""Function that checks if widget has neighbor in specified cell.
Used for navigating CUI, as arrow keys find the immediate neighbor
Parameters
----------
direction : py_cui.keys.KEY_*
The direction in which to search
Returns
-------
widget_id : str
The widget neighbor ID if found, None otherwise
"""
start_widget = self.get_widgets()[self._selected_widget]
# Find all the widgets in the given row or column
neighbors = []
if direction in [py_cui.keys.KEY_DOWN_ARROW, py_cui.keys.KEY_UP_ARROW]:
neighbors = self._get_vertical_neighbors(start_widget, direction)
elif direction in [py_cui.keys.KEY_RIGHT_ARROW, py_cui.keys.KEY_LEFT_ARROW]:
neighbors = self._get_horizontal_neighbors(start_widget, direction)
if len(neighbors) == 0:
return None
# We select the best match to jump to (first neighbor)
return neighbors[0]
def get_selected_widget(self):
"""Function that gets currently selected widget
Returns
-------
selected_widget : py_cui.widgets.Widget
Reference to currently selected widget object
"""
if self._selected_widget is not None and self._selected_widget in self.get_widgets().keys():
return self.get_widgets()[self._selected_widget]
else:
self._logger.warn('Selected widget ID is None or invalid')
return None
def set_selected_widget(self, widget_id):
"""Function that sets the selected widget for the CUI
Parameters
----------
widget_id : str
the id of the widget to select
"""
if widget_id in self.get_widgets().keys():
self._logger.info('Setting selected widget to ID {}'.format(widget_id))
self._selected_widget = widget_id
else:
self._logger.warn('Widget w/ ID {} does not exist among current widgets.'.format(widget_id))
def lose_focus(self):
"""Function that forces py_cui out of focus mode.
After popup is called, focus is lost
"""
if self._in_focused_mode:
self._in_focused_mode = False
self.status_bar.set_text(self._init_status_bar_text)
self.get_widgets()[self._selected_widget].set_selected(False)
else:
self._logger.info('lose_focus: Not currently in focus mode')
def move_focus(self, widget, auto_press_buttons=True):
"""Moves focus mode to different widget
Parameters
----------
widget : Widget
The widget object we want to move focus to.
"""
self.lose_focus()
self.set_selected_widget(widget.get_id())
# If autofocus buttons is selected, we automatically process the button command and reset to overview mode
if self._auto_focus_buttons and auto_press_buttons and isinstance(widget, py_cui.widgets.Button):
widget.command()
self._logger.info('Moved focus to button {} - ran autofocus command'.format(widget.get_title()))
elif self._auto_focus_buttons and isinstance(widget, py_cui.widgets.Button):
self.status_bar.set_text(self._init_status_bar_text)
else:
widget.set_selected(True)
self._in_focused_mode = True
self.status_bar.set_text(widget.get_help_text())
self._logger.info('Moved focus to widget {}'.format(widget.get_title()))
def _cycle_widgets(self, reverse=False):
"""Function that is fired if cycle key is pressed to move to next widget
Parameters
----------
reverse : bool
Default false. If true, cycle widgets in reverse order.
"""
num_widgets = len(self.get_widgets().keys())
current_widget_num = int(self._selected_widget.split('Widget')[1])
if not reverse:
next_widget_num = current_widget_num + 1
if next_widget_num == num_widgets:
next_widget_num = 0
cycle_key = self._forward_cycle_key
else:
next_widget_num = current_widget_num - 1
if next_widget_num < 0:
next_widget_num = num_widgets - 1
cycle_key = self._reverse_cycle_key
current_widget_id = 'Widget{}'.format(current_widget_num)
next_widget_id = 'Widget{}'.format(next_widget_num)
if self._in_focused_mode and cycle_key in self.get_widgets()[current_widget_id]._key_commands.keys():
# In the event that we are focusing on a widget with that key defined, we do not cycle.
pass
else:
self.move_focus(self.get_widgets()[next_widget_id], auto_press_buttons=False)
def add_key_command(self, key, command):
"""Function that adds a keybinding to the CUI when in overview mode
Parameters
----------
key : py_cui.keys.KEY_*
The key bound to the command
command : Function
A no-arg or lambda function to fire on keypress
"""
self._keybindings[key] = command
# Popup functions. Used to display messages, warnings, and errors to the user.
def show_message_popup(self, title, text):
"""Shows a message popup
Parameters
----------
title : str
Message title
text : str
Message text
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_warning_popup(self, title, text):
"""Shows a warning popup
Parameters
----------
title : str
Warning title
text : str
Warning text
"""
color = YELLOW_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'WARNING - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_error_popup(self, title, text):
"""Shows an error popup
Parameters
----------
title : str
Error title
text : str
Error text
"""
color = RED_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'ERROR - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_yes_no_popup(self, title, command):
"""Shows a yes/no popup.
The 'command' parameter must be a function with a single boolean parameter
Parameters
----------
title : str
Message title
command : function
A function taking in a single boolean parameter. Will be fired with True if yes selected, false otherwise
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.YesNoPopup(self, title + '- (y/n)', 'Yes - (y), No - (n)', color, command, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_text_box_popup(self, title, command, password=False):
"""Shows a textbox popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
Message title
command : Function
A function with a single string parameter, fired with contents of textbox when enter key pressed
password=False : bool
If true, write characters as '*'
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.TextBoxPopup(self, title, color, command, self._renderer, password, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_menu_popup(self, title, menu_items, command, run_command_if_none=False):
"""Shows a menu popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
menu title
menu_items : list of str
A list of menu items
command : Function
A function taking in a single string argument. Fired with selected menu item when ENTER pressed.
run_command_if_none=False : bool
If True, will run command passing in None if no menu item selected.
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MenuPopup(self, menu_items, title, color, command, self._renderer, self._logger, run_command_if_none)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_icon_popup(self, title, message, callback=None):
"""Shows a loading icon popup
Parameters
----------
title : str
Message title
message : str
Message text. Will show as '$message...'
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingIconPopup(self, title, message, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_bar_popup(self, title, num_items, callback=None):
"""Shows loading bar popup.
Use 'increment_loading_bar' to show progress
Parameters
----------
title : str
Message title
num_items : int
Number of items to iterate through for loading
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingBarPopup(self, title, num_items, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_form_popup(self, title, fields, passwd_fields=[], required=[], callback=None):
"""Shows form popup.
Used for inputting several fields worth of values
Parameters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.form.FormPopup(self, fields, passwd_fields, required, {}, title, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
if callback is not None:
self._popup.set_on_submit_action(callback)
def show_filedialog_popup(self, popup_type='openfile', initial_dir='.', callback=None, ascii_icons=True, limit_extensions=[]):
"""Shows form popup.
Used for inputting several fields worth of values
Paramters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.filedialog.FileDialogPopup(self, callback, initial_dir, popup_type, ascii_icons, limit_extensions, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
def increment_loading_bar(self):
"""Increments progress bar if loading bar popup is open
"""
if self._popup is not None:
self._popup._increment_counter()
else:
self._logger.warn('No popup is currently opened.')
def stop_loading_popup(self):
"""Leaves loading state, and closes popup.
Must be called by user to escape loading.
"""
self._loading = False
self.close_popup()
self._logger.info('Stopping open loading popup')
def close_popup(self):
"""Closes the popup, and resets focus
"""
self.lose_focus()
self._popup = None
def _refresh_height_width(self, height, width):
"""Function that updates the height and width of the CUI based on terminal window size
Parameters
----------
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
self._height = height
self._width = width
self._grid.update_grid_height_width(self._height, self._width)
for widget_id in self._widgets.keys():
self._widgets[widget_id].update_height_width()
if self._popup is not None:
self._popup.update_height_width()
def get_absolute_size(self):
"""Returns dimensions of CUI
Returns
-------
height, width : int
The dimensions of drawable CUI space in characters
"""
return self._height, self._width
# Draw Functions. Function for drawing widgets, status bars, and popups
def _draw_widgets(self):
"""Function that draws all of the widgets to the screen
"""
for widget_key in self.get_widgets().keys():
if widget_key != self._selected_widget:
self.get_widgets()[widget_key]._draw()
# We draw the selected widget last to support cursor location.
if self._selected_widget is not None:
self.get_widgets()[self._selected_widget]._draw()
self._logger.info('Drew widgets')
def _draw_status_bars(self, stdscr, height, width):
"""Draws status bar and title bar
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the status bar
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
if self.status_bar is not None:
stdscr.attron(curses.color_pair(self.status_bar.get_color()))
stdscr.addstr(height + 3, 0, fit_text(width, self.status_bar.get_text()))
stdscr.attroff(curses.color_pair(self.status_bar.get_color()))
if self.title_bar is not None:
stdscr.attron(curses.color_pair(self.title_bar.get_color()))
stdscr.addstr(0, 0, fit_text(width, self._title, center=True))
stdscr.attroff(curses.color_pair(self.title_bar.get_color()))
def _display_window_warning(self, stdscr, error_info):
"""Function that prints some basic error info if there is an error with the CUI
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the warning
error_info : str
The information regarding the error.
"""
stdscr.clear()
stdscr.attron(curses.color_pair(RED_ON_BLACK))
stdscr.addstr(0, 0, 'Error displaying CUI!!!')
stdscr.addstr(1, 0, 'Error Type: {}'.format(error_info))
stdscr.addstr(2, 0, 'Most likely terminal dimensions are too small.')
stdscr.attroff(curses.color_pair(RED_ON_BLACK))
stdscr.refresh()
self._logger.info('Encountered error -> {}'.format(error_info))
def _handle_key_presses(self, key_pressed):
"""Function that handles all main loop key presses.
Parameters
----------
key_pressed : py_cui.keys.KEY_*
The key being pressed
"""
# Selected widget represents which widget is being hovered over, though not necessarily in focus mode
if self._selected_widget is None:
return
selected_widget = self.get_widgets()[self._selected_widget]
# If we are in focus mode, the widget has all of the control of the keyboard except
# for the escape key, which exits focus mode.
if self._in_focused_mode and self._popup is None:
if key_pressed == py_cui.keys.KEY_ESCAPE:
self.status_bar.set_text(self._init_status_bar_text)
self._in_focused_mode = False
selected_widget.set_selected(False)
self._logger.info('Exiting focus mode on widget {}'.format(selected_widget.get_title()))
else:
# widget handles remaining py_cui.keys
self._logger.info('Widget {} handling {} key'.format(selected_widget.get_title(), key_pressed))
selected_widget._handle_key_press(key_pressed)
# Otherwise, barring a popup, we are in overview mode, meaning that arrow py_cui.keys move between widgets, and Enter key starts focus mode
elif self._popup is None:
if key_pressed == py_cui.keys.KEY_ENTER and self._selected_widget is not None and selected_widget.is_selectable():
self.move_focus(selected_widget)
for key in self._keybindings.keys():
if key_pressed == key:
command = self._keybindings[key]
self._logger.info('Detected binding for key {}, running command {}'.format(key_pressed, command.__name__))
command()
# If not in focus mode, use the arrow py_cui.keys to move around the selectable widgets.
neighbor = None
if key_pressed in py_cui.keys.ARROW_KEYS:
neighbor = self._check_if_neighbor_exists(key_pressed)
if neighbor is not None:
self.set_selected_widget(neighbor)
self._logger.info('Navigated to neighbor widget {}'.format(self.get_widgets()[self._selected_widget].get_title()))
# if we have a popup, that takes key control from both overview and focus mode
elif self._popup is not None:
self._logger.info('Popup {} handling key {}'.format(self._popup.get_title(), key_pressed))
self._popup._handle_key_press(key_pressed)
def _draw(self, stdscr):
"""Main CUI draw loop called by start()
Parameters
----------
stdscr : curses Standard screen
The screen buffer used for drawing CUI elements
"""
self._stdscr = stdscr
key_pressed = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
curses.mousemask(curses.ALL_MOUSE_EVENTS)
# stdscr.nodelay(False)
#stdscr.keypad(True)
# Initialization functions. Generates colors and renderer
self._initialize_colors()
self._initialize_widget_renderer()
# If user specified a refresh timeout, apply it here
if self._refresh_timeout > 0:
self._stdscr.timeout(self._refresh_timeout)
# If user sets non-default border characters, update them here
if self._border_characters is not None:
self._renderer._set_border_renderer_chars(self._border_characters)
# Loop where key_pressed is the last character pressed. Wait for exit key while no popup or focus mode
while key_pressed != self._exit_key or self._in_focused_mode or self._popup is not None:
try:
# If we call stop, we want to break out of the main draw loop
if self._stopped:
break
# Initialization and size adjustment
stdscr.erase()
# find height width, adjust if status/title bar added. We decrement the height by 4 to account for status/title bar and padding
if self._simulated_terminal is None:
height, width = stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
# If the user defined an update function to fire on each draw call,
# Run it here. This can of course be also handled user-side
# through a separate thread.
if self._on_draw_update_func is not None:
self._on_draw_update_func()
# This is what allows the CUI to be responsive. Adjust grid size based on current terminal size
# Resize the grid and the widgets if there was a resize operation
if key_pressed == curses.KEY_RESIZE:
self._logger.info('Resizing CUI to new dimensions {} by {}'.format(height, width))
try:
self._refresh_height_width(height, width)
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.info('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Here we handle mouse click events globally, or pass them to the UI element to handle
elif key_pressed == curses.KEY_MOUSE:
self._logger.info('Detected mouse click')
_, x, y, _, _ = curses.getmouse()
in_element = self.get_element_at_position(x, y)
# In first case, we click inside already selected widget, pass click for processing
if in_element is not None and in_element.is_selected():
in_element._handle_mouse_press(x, y)
# Otherwise, if not a popup, select the clicked on widget
elif in_element is not None and not isinstance(in_element, py_cui.popups.Popup):
self.move_focus(in_element)
in_element._handle_mouse_press(x, y)
# If we have a post_loading_callback, fire it here
if self._post_loading_callback is not None and not self._loading:
self._logger.info('Firing post-loading callback function {}'.format(self._post_loading_callback.__name__))
self._post_loading_callback()
self._post_loading_callback = None
# Handle widget cycling
if key_pressed == self._forward_cycle_key:
self._cycle_widgets()
elif key_pressed == self._reverse_cycle_key:
self._cycle_widgets(reverse=True)
# Handle keypresses
self._handle_key_presses(key_pressed)
try:
# Draw status/title bar, and all widgets. Selected widget will be bolded.
self._draw_status_bars(stdscr, height, width)
self._draw_widgets()
# draw the popup if required
if self._popup is not None:
self._popup._draw()
except curses.error as e:
self._logger.error('Curses error while drawing TUI')
self._display_window_warning(stdscr, str(e))
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.error('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Refresh the screen
stdscr.refresh()
# Wait for next input
if self._loading or self._post_loading_callback is not None:
# When loading, refresh screen every quarter second
time.sleep(0.25)
# Need to reset key_pressed, because otherwise the previously pressed key will be used.
key_pressed = 0
elif self._stopped:
key_pressed = self._exit_key
else:
self._logger.info('Waiting for next keypress')
key_pressed = stdscr.getch()
except KeyboardInterrupt:
self._logger.info('Detect Keyboard Interrupt, Exiting...')
self._stopped = True
stdscr.erase()
stdscr.refresh()
curses.endwin()
if self._on_stop is not None:
self._logger.info('Firing onstop function {}'.format(self._on_stop.__name__))
self._on_stop()
def __format__(self, fmt):
"""Override of base format function. Prints list of current widgets.
Parameters
----------
fmt : Format
The format to override
"""
out = ''
for widget in self.get_widgets().keys():
out += '{}\n'.format(self.get_widgets()[widget].get_title())
return out
| """A python library for intuitively creating CUI/TUI interfaces with pre-built widgets.
"""
#
# Author: <NAME>
# Created: 12-Aug-2019
# Docs: https://jwlodek.github.io/py_cui-docs
# License: BSD-3-Clause (New/Revised)
#
# Some python core library imports
import sys
import os
import time
import copy
import shutil # We use shutil for getting the terminal dimensions
import threading # Threading is used for loading icon popups
import logging # Use logging library for debug purposes
# py_cui uses the curses library. On windows this does not exist, but
# there is a open source windows-curses module that adds curses support
# for python on windows
import curses
# py_cui imports
import py_cui
import py_cui.keys
import py_cui.statusbar
import py_cui.widgets
import py_cui.controls
import py_cui.dialogs
import py_cui.widget_set
import py_cui.popups
import py_cui.renderer
import py_cui.debug
import py_cui.errors
from py_cui.colors import *
# Version number
__version__ = '0.1.3'
def fit_text(width, text, center=False):
"""Fits text to screen size
Helper function to fit text within a given width. Used to fix issue with status/title bar text
being too long
Parameters
----------
width : int
width of window in characters
text : str
input text
center : Boolean
flag to center text
Returns
-------
fitted_text : str
text fixed depending on width
"""
if width < 5:
return '.' * width
if len(text) >= width:
return text[:width - 5] + '...'
else:
total_num_spaces = (width - len(text) - 1)
if center:
left_spaces = int(total_num_spaces / 2)
right_spaces = int(total_num_spaces / 2)
if(total_num_spaces % 2 == 1):
right_spaces = right_spaces + 1
return ' ' * left_spaces + text + ' ' * right_spaces
else:
return text + ' ' * total_num_spaces
class PyCUI:
"""Base CUI class
Main user interface class for py_cui. To create a user interface, you must
first create an instance of this class, and then add cells + widgets to it.
Attributes
----------
cursor_x, cursor_y : int
absolute position of the cursor in the CUI
grid : py_cui.grid.Grid
The main layout manager for the CUI
widgets : dict of str - py_cui.widgets.Widget
dict of widget in the grid
title_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the top of the CUI
status_bar : py_cui.statusbar.StatusBar
a status bar object that gets drawn at the bottom of the CUI
keybindings : list of py_cui.keybinding.KeyBinding
list of keybindings to check against in the main CUI loop
height, width : int
height of the terminal in characters, width of terminal in characters
exit_key : key_code
a key code for a key that exits the CUI
simulated_terminal : List[int]
Dimensions for an alternative simulated terminal (used for testing)
"""
def __init__(self, num_rows, num_cols, auto_focus_buttons=True,
exit_key=py_cui.keys.KEY_Q_LOWER, simulated_terminal=None):
"""Constructor for PyCUI class
"""
self._title = 'PyCUI Window'
# When this is not set, the escape character delay
# is too long for exiting focus mode
os.environ.setdefault('ESCDELAY', '25')
# For unit testing purposes, we want to simulate terminal
# dimensions so that we don't get errors
self._simulated_terminal = simulated_terminal
if self._simulated_terminal is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
height = simulated_terminal[0]
width = simulated_terminal[1]
# Init terminal height width. Subtract 4 from height
# for title/status bar and padding
self._height = height
self._width = width
self._height = self._height - 4
# Add status and title bar
self.title_bar = py_cui.statusbar.StatusBar(self._title, BLACK_ON_WHITE)
exit_key_char = py_cui.keys.get_char_from_ascii(exit_key)
self._init_status_bar_text = 'Press - {} - to exit. Arrow Keys to move ' \
'between widgets. Enter to enter focus ' \
'mode.'.format(exit_key_char)
self.status_bar = py_cui.statusbar.StatusBar(self._init_status_bar_text,
BLACK_ON_WHITE)
# Logging object initialization for py_cui
self._logger = py_cui.debug._initialize_logger(self,
name='py_cui')
# Initialize grid, renderer, and widget dict
self._grid = py_cui.grid.Grid(num_rows, num_cols, self._height, self._width, self._logger)
self._renderer = None
self._border_characters = None
self._stdscr = None
self._widgets = {}
self._refresh_timeout = -1
# Variables for determining selected widget/focus mode
self._selected_widget = None
self._in_focused_mode = False
self._popup = None
self._auto_focus_buttons = auto_focus_buttons
# CUI blocks when loading popup is open
self._loading = False
self._stopped = False
self._post_loading_callback = None
self._on_draw_update_func = None
# Top level keybindings. Exit key is 'q' by default
self._keybindings = {}
self._exit_key = exit_key
self._forward_cycle_key = py_cui.keys.KEY_CTRL_LEFT
self._reverse_cycle_key = py_cui.keys.KEY_CTRL_RIGHT
# Callback to fire when CUI is stopped.
self._on_stop = None
def set_refresh_timeout(self, timeout):
"""Sets the CUI auto-refresh timeout to a number of seconds.
Parameters
----------
timeout : int
Number of seconds to wait before refreshing the CUI
"""
# We want the refresh timeout in milliseconds as an integer
self._refresh_timeout = int(timeout * 1000)
def set_on_draw_update_func(self, update_function):
"""Adds a function that is fired during each draw call of the CUI
Parameters
----------
update_function : function
A no-argument or lambda function that is fired at the start of each draw call
"""
self._on_draw_update_func = update_function
def set_widget_cycle_key(self, forward_cycle_key=None, reverse_cycle_key=None):
"""Assigns a key for automatically cycling through widgets in both focus and overview modes
Parameters
----------
widget_cycle_key : py_cui.keys.KEY
Key code for key to cycle through widgets
"""
if forward_cycle_key is not None:
self._forward_cycle_key = forward_cycle_key
if reverse_cycle_key is not None:
self._reverse_cycle_key = reverse_cycle_key
def enable_logging(self, log_file_path='py_cui_log.txt', logging_level = logging.DEBUG):
"""Function enables logging for py_cui library
Parameters
----------
log_file_path : str
The target log filepath. Default 'py_cui_log.txt
logging_level : int
Default logging level = logging.DEBUG
"""
try:
py_cui.debug._enable_logging(self._logger, filename=log_file_path, logging_level=logging_level)
self._logger.info('Initialized logger')
except PermissionError as e:
print('Failed to initialize logger: {}'.format(str(e)))
def apply_widget_set(self, new_widget_set):
"""Function that replaces all widgets in a py_cui with those of a different widget set
Parameters
----------
new_widget_set : WidgetSet
The new widget set to switch to
Raises
------
TypeError
If input is not of type WidgetSet
"""
if isinstance(new_widget_set, py_cui.widget_set.WidgetSet):
self.lose_focus()
self._widgets = new_widget_set._widgets
self._grid = new_widget_set._grid
self._keybindings = new_widget_set._keybindings
if self._simulated_terminal is None:
if self._stdscr is None:
term_size = shutil.get_terminal_size()
height = term_size.lines
width = term_size.columns
else:
# Use curses termsize when possible to fix resize bug on windows.
height, width = self._stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
self._refresh_height_width(height, width)
if self._stdscr is not None:
self._initialize_widget_renderer()
self._selected_widget = new_widget_set._selected_widget
else:
raise TypeError('Argument must be of type py_cui.widget_set.WidgetSet')
def create_new_widget_set(self, num_rows, num_cols):
"""Function that is used to create additional widget sets
Use this function instead of directly creating widget set object instances, to allow
for logging support.
Parameters
----------
num_rows : int
row count for new widget set
num_cols : int
column count for new widget set
Returns
-------
new_widget_set : py_cui.widget_set.WidgetSet
The new widget set object instance
"""
# Use current logging object and simulated terminal for sub-widget sets
return py_cui.widget_set.WidgetSet(num_rows, num_cols, self._logger,
simulated_terminal=self._simulated_terminal)
# ----------------------------------------------#
# Initialization functions #
# Used to initialzie CUI and its features #
# ----------------------------------------------#
def start(self):
"""Function that starts the CUI
"""
self._logger.info('Starting {} CUI'.format(self._title))
curses.wrapper(self._draw)
def stop(self):
"""Function that stops the CUI, and fires the callback function.
Callback must be a no arg method
"""
self._logger.info('Stopping CUI')
self._stopped = True
def run_on_exit(self, command):
"""Sets callback function on CUI exit. Must be a no-argument function or lambda function
Parameters
----------
command : function
A no-argument or lambda function to be fired on exit
"""
self._on_stop = command
def set_title(self, title):
"""Sets the title bar text
Parameters
----------
title : str
New title for CUI
"""
self._title = title
def set_status_bar_text(self, text):
"""Sets the status bar text when in overview mode
Parameters
----------
text : str
Status bar text
"""
self._init_status_bar_text = text
self.status_bar.set_text(text)
def _initialize_colors(self):
"""Function for initialzing curses colors. Called when CUI is first created.
"""
# Start colors in curses.
# For each color pair in color map, initialize color combination.
curses.start_color()
curses.init_color(curses.COLOR_BLUE, 0, 0, 500)
for color_pair in py_cui.colors._COLOR_MAP.keys():
fg_color, bg_color = py_cui.colors._COLOR_MAP[color_pair]
curses.init_pair(color_pair, fg_color, bg_color)
def _initialize_widget_renderer(self):
"""Function that creates the renderer object that will draw each widget
"""
if self._renderer is None:
self._renderer = py_cui.renderer.Renderer(self, self._stdscr, self._logger)
for widget_id in self._widgets.keys():
self._widgets[widget_id]._assign_renderer(self._renderer)
if self._popup is not None:
self._popup._assign_renderer(self._renderer)
def toggle_unicode_borders(self):
"""Function for toggling unicode based border rendering
"""
if self._border_characters is None or self._border_characters['UP_LEFT'] == '+':
self.set_widget_border_characters('\u256d', '\u256e', '\u2570', '\u256f', '\u2500', '\u2502')
else:
self.set_widget_border_characters('+', '+', '+', '+', '-', '|')
def set_widget_border_characters(self, upper_left_corner, upper_right_corner, lower_left_corner, lower_right_corner, horizontal, vertical):
"""Function that can be used to set arbitrary border characters for drawing widget borders by renderer.
Parameters
----------
upper_left_corner : char
Upper left corner character
upper_right_corner : char
Upper right corner character
lower_left_corner : char
Upper left corner character
lower_right_corner : char
Lower right corner character
horizontal : char
Horizontal border character
vertical : char
Vertical border character
"""
self._border_characters = {
'UP_LEFT': upper_left_corner,
'UP_RIGHT': upper_right_corner,
'DOWN_LEFT': lower_left_corner,
'DOWN_RIGHT': lower_right_corner,
'HORIZONTAL': horizontal,
'VERTICAL': vertical
}
self._logger.info('Set border_characters to {}'.format(self._border_characters))
def get_widgets(self):
"""Function that gets current set of widgets
Returns
-------
widgets : dict of str -> widget
dictionary mapping widget IDs to object instances
"""
return self._widgets
# Widget add functions. Each of these adds a particular type of widget
# to the grid in a specified location.
def add_scroll_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0) -> py_cui.widgets.ScrollMenu:
"""Function that adds a new scroll menu to the CUI grid
Parameters
----------
title : str
The title of the scroll menu
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_scroll_menu : ScrollMenu
A reference to the created scroll menu object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_scroll_menu = py_cui.widgets.ScrollMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_scroll_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_scroll_menu))))
return new_scroll_menu
def add_checkbox_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0, checked_char='X') -> py_cui.widgets.CheckBoxMenu:
"""Function that adds a new checkbox menu to the CUI grid
Parameters
----------
title : str
The title of the checkbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
checked_char='X' : char
The character used to mark 'Checked' items
Returns
-------
new_checkbox_menu : CheckBoxMenu
A reference to the created checkbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_checkbox_menu = py_cui.widgets.CheckBoxMenu(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
checked_char)
self._widgets[id] = new_checkbox_menu
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_checkbox_menu))))
return new_checkbox_menu
def add_text_box(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '', password = False) -> py_cui.widgets.TextBox:
"""Function that adds a new text box to the CUI grid
Parameters
----------
title : str
The title of the textbox
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the textbox
password=False : bool
Toggle to show '*' instead of characters.
Returns
-------
new_text_box : TextBox
A reference to the created textbox object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_box = py_cui.widgets.TextBox(id,
title,
self._grid,
row, column,
row_span,
column_span,
padx, pady,
self._logger,
initial_text,
password)
self._widgets[id] = new_text_box
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_box))))
return new_text_box
def add_text_block(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '') -> py_cui.widgets.ScrollTextBlock:
"""Function that adds a new text block to the CUI grid
Parameters
----------
title : str
The title of the text block
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
initial_text='' : str
Initial text for the text block
Returns
-------
new_text_block : ScrollTextBlock
A reference to the created textblock object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_text_block = py_cui.widgets.ScrollTextBlock(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
initial_text)
self._widgets[id] = new_text_block
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_block))))
return new_text_block
def add_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0) -> py_cui.widgets.Label:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
Returns
-------
new_label : Label
A reference to the created label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.Label(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_block_label(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, center=True) -> py_cui.widgets.BlockLabel:
"""Function that adds a new block label to the CUI grid
Parameters
----------
title : str
The title of the block label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
center : bool
flag to tell label to be centered or left-aligned.
Returns
-------
new_label : BlockLabel
A reference to the created block label object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_label = py_cui.widgets.BlockLabel(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
center,
self._logger)
self._widgets[id] = new_label
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_label))))
return new_label
def add_button(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, command=None) -> py_cui.widgets.Button:
"""Function that adds a new button to the CUI grid
Parameters
----------
title : str
The title of the button
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
command=None : Function
A no-argument or lambda function to fire on button press.
Returns
-------
new_button : Button
A reference to the created button object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_button = py_cui.widgets.Button(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
command)
self._widgets[id] = new_button
if self._selected_widget is None:
self.set_selected_widget(id)
self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_button))))
return new_button
def add_slider(self, title, row, column, row_span=1,
column_span=1, padx=1, pady=0,
min_val=0, max_val=100, step=1, init_val=0) -> py_cui.controls.slider.SliderWidget:
"""Function that adds a new label to the CUI grid
Parameters
----------
title : str
The title of the label
row : int
The row value, from the top down
column : int
The column value from the top down
row_span=1 : int
The number of rows to span accross
column_span=1 : int
the number of columns to span accross
padx=1 : int
number of padding characters in the x direction
pady=0 : int
number of padding characters in the y direction
min_val = 0 int
min value of the slider
max_val = 0 int
max value of the slider
step = 0 int
step to incremento or decrement
init_val = 0 int
initial value of the slider
Returns
-------
new_slider : Slider
A reference to the created slider object.
"""
id = 'Widget{}'.format(len(self._widgets.keys()))
new_slider = py_cui.controls.slider.SliderWidget(id,
title,
self._grid,
row,
column,
row_span,
column_span,
padx,
pady,
self._logger,
min_val,
max_val,
step,
init_val)
self._widgets[id] = new_slider
self._logger.info('Adding widget {} w/ ID {} of type {}'
.format(title, id, str(type(new_slider))))
return new_slider
def get_element_at_position(self, x, y):
"""Returns containing widget for character position
Parameters
----------
x : int
Horizontal character position
y : int
Vertical character position, top down
Returns
-------
in_widget : UIElement
Widget or popup that is within the position None if nothing
"""
if self._popup is not None and self._popup._contains_position(x, y):
return self._popup
elif self._popup is None:
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._contains_position(x, y):
return self.get_widgets()[widget_id]
return None
def _get_horizontal_neighbors(self, widget, direction):
"""Gets all horizontal (left, right) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
_, num_cols = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_LEFT_ARROW:
col_range_start = 0
col_range_stop = col_start
else:
col_range_start = col_start + col_span
col_range_stop = num_cols
for col in range(col_range_start, col_range_stop):
for row in range(row_start, row_start + row_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_LEFT_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
def _get_vertical_neighbors(self, widget, direction):
"""Gets all vertical (up, down) neighbor widgets
Parameters
----------
widget : py_cui.widgets.Widget
The currently selected widget
direction : py_cui.keys.KEY*
must be an arrow key value
Returns
-------
id_list : list[]
A list of the neighbor widget ids
"""
if not direction in py_cui.keys.ARROW_KEYS:
return None
num_rows, _ = self._grid.get_dimensions()
row_start, col_start = widget.get_grid_cell()
row_span, col_span = widget.get_grid_cell_spans()
id_list = []
if direction == py_cui.keys.KEY_UP_ARROW:
row_range_start = 0
row_range_stop = row_start
else:
row_range_start = row_start + row_span
row_range_stop = num_rows
for row in range(row_range_start, row_range_stop):
for col in range(col_start, col_start + col_span):
for widget_id in self.get_widgets().keys():
if self.get_widgets()[widget_id]._is_row_col_inside(row, col) and widget_id not in id_list:
id_list.append(widget_id)
if direction == py_cui.keys.KEY_UP_ARROW:
id_list.reverse()
self._logger.info('Neighbors with ids {} for cell {},{} span {},{}'.format(id_list,
row_start,
col_start,
row_span,
col_span))
return id_list
# CUI status functions. Used to switch between widgets, set the mode, and
# identify neighbors for overview mode
def _check_if_neighbor_exists(self, direction):
"""Function that checks if widget has neighbor in specified cell.
Used for navigating CUI, as arrow keys find the immediate neighbor
Parameters
----------
direction : py_cui.keys.KEY_*
The direction in which to search
Returns
-------
widget_id : str
The widget neighbor ID if found, None otherwise
"""
start_widget = self.get_widgets()[self._selected_widget]
# Find all the widgets in the given row or column
neighbors = []
if direction in [py_cui.keys.KEY_DOWN_ARROW, py_cui.keys.KEY_UP_ARROW]:
neighbors = self._get_vertical_neighbors(start_widget, direction)
elif direction in [py_cui.keys.KEY_RIGHT_ARROW, py_cui.keys.KEY_LEFT_ARROW]:
neighbors = self._get_horizontal_neighbors(start_widget, direction)
if len(neighbors) == 0:
return None
# We select the best match to jump to (first neighbor)
return neighbors[0]
def get_selected_widget(self):
"""Function that gets currently selected widget
Returns
-------
selected_widget : py_cui.widgets.Widget
Reference to currently selected widget object
"""
if self._selected_widget is not None and self._selected_widget in self.get_widgets().keys():
return self.get_widgets()[self._selected_widget]
else:
self._logger.warn('Selected widget ID is None or invalid')
return None
def set_selected_widget(self, widget_id):
"""Function that sets the selected widget for the CUI
Parameters
----------
widget_id : str
the id of the widget to select
"""
if widget_id in self.get_widgets().keys():
self._logger.info('Setting selected widget to ID {}'.format(widget_id))
self._selected_widget = widget_id
else:
self._logger.warn('Widget w/ ID {} does not exist among current widgets.'.format(widget_id))
def lose_focus(self):
"""Function that forces py_cui out of focus mode.
After popup is called, focus is lost
"""
if self._in_focused_mode:
self._in_focused_mode = False
self.status_bar.set_text(self._init_status_bar_text)
self.get_widgets()[self._selected_widget].set_selected(False)
else:
self._logger.info('lose_focus: Not currently in focus mode')
def move_focus(self, widget, auto_press_buttons=True):
"""Moves focus mode to different widget
Parameters
----------
widget : Widget
The widget object we want to move focus to.
"""
self.lose_focus()
self.set_selected_widget(widget.get_id())
# If autofocus buttons is selected, we automatically process the button command and reset to overview mode
if self._auto_focus_buttons and auto_press_buttons and isinstance(widget, py_cui.widgets.Button):
widget.command()
self._logger.info('Moved focus to button {} - ran autofocus command'.format(widget.get_title()))
elif self._auto_focus_buttons and isinstance(widget, py_cui.widgets.Button):
self.status_bar.set_text(self._init_status_bar_text)
else:
widget.set_selected(True)
self._in_focused_mode = True
self.status_bar.set_text(widget.get_help_text())
self._logger.info('Moved focus to widget {}'.format(widget.get_title()))
def _cycle_widgets(self, reverse=False):
"""Function that is fired if cycle key is pressed to move to next widget
Parameters
----------
reverse : bool
Default false. If true, cycle widgets in reverse order.
"""
num_widgets = len(self.get_widgets().keys())
current_widget_num = int(self._selected_widget.split('Widget')[1])
if not reverse:
next_widget_num = current_widget_num + 1
if next_widget_num == num_widgets:
next_widget_num = 0
cycle_key = self._forward_cycle_key
else:
next_widget_num = current_widget_num - 1
if next_widget_num < 0:
next_widget_num = num_widgets - 1
cycle_key = self._reverse_cycle_key
current_widget_id = 'Widget{}'.format(current_widget_num)
next_widget_id = 'Widget{}'.format(next_widget_num)
if self._in_focused_mode and cycle_key in self.get_widgets()[current_widget_id]._key_commands.keys():
# In the event that we are focusing on a widget with that key defined, we do not cycle.
pass
else:
self.move_focus(self.get_widgets()[next_widget_id], auto_press_buttons=False)
def add_key_command(self, key, command):
"""Function that adds a keybinding to the CUI when in overview mode
Parameters
----------
key : py_cui.keys.KEY_*
The key bound to the command
command : Function
A no-arg or lambda function to fire on keypress
"""
self._keybindings[key] = command
# Popup functions. Used to display messages, warnings, and errors to the user.
def show_message_popup(self, title, text):
"""Shows a message popup
Parameters
----------
title : str
Message title
text : str
Message text
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_warning_popup(self, title, text):
"""Shows a warning popup
Parameters
----------
title : str
Warning title
text : str
Warning text
"""
color = YELLOW_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'WARNING - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_error_popup(self, title, text):
"""Shows an error popup
Parameters
----------
title : str
Error title
text : str
Error text
"""
color = RED_ON_BLACK
self._popup = py_cui.popups.MessagePopup(self, 'ERROR - ' + title, text, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_yes_no_popup(self, title, command):
"""Shows a yes/no popup.
The 'command' parameter must be a function with a single boolean parameter
Parameters
----------
title : str
Message title
command : function
A function taking in a single boolean parameter. Will be fired with True if yes selected, false otherwise
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.YesNoPopup(self, title + '- (y/n)', 'Yes - (y), No - (n)', color, command, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_text_box_popup(self, title, command, password=False):
"""Shows a textbox popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
Message title
command : Function
A function with a single string parameter, fired with contents of textbox when enter key pressed
password=False : bool
If true, write characters as '*'
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.TextBoxPopup(self, title, color, command, self._renderer, password, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_menu_popup(self, title, menu_items, command, run_command_if_none=False):
"""Shows a menu popup.
The 'command' parameter must be a function with a single string parameter
Parameters
----------
title : str
menu title
menu_items : list of str
A list of menu items
command : Function
A function taking in a single string argument. Fired with selected menu item when ENTER pressed.
run_command_if_none=False : bool
If True, will run command passing in None if no menu item selected.
"""
color = WHITE_ON_BLACK
self._popup = py_cui.popups.MenuPopup(self, menu_items, title, color, command, self._renderer, self._logger, run_command_if_none)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_icon_popup(self, title, message, callback=None):
"""Shows a loading icon popup
Parameters
----------
title : str
Message title
message : str
Message text. Will show as '$message...'
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingIconPopup(self, title, message, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_loading_bar_popup(self, title, num_items, callback=None):
"""Shows loading bar popup.
Use 'increment_loading_bar' to show progress
Parameters
----------
title : str
Message title
num_items : int
Number of items to iterate through for loading
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
if callback is not None:
self._post_loading_callback = callback
color = WHITE_ON_BLACK
self._loading = True
self._popup = py_cui.popups.LoadingBarPopup(self, title, num_items, color, self._renderer, self._logger)
self._logger.info('Opened {} popup with title {}'.format(str(type(self._popup)), self._popup.get_title()))
def show_form_popup(self, title, fields, passwd_fields=[], required=[], callback=None):
"""Shows form popup.
Used for inputting several fields worth of values
Parameters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.form.FormPopup(self, fields, passwd_fields, required, {}, title, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
if callback is not None:
self._popup.set_on_submit_action(callback)
def show_filedialog_popup(self, popup_type='openfile', initial_dir='.', callback=None, ascii_icons=True, limit_extensions=[]):
"""Shows form popup.
Used for inputting several fields worth of values
Paramters
---------
title : str
Message title
fields : List[str]
Names of each individual field
passwd_fields : List[str]
Field names that should have characters hidden
required : List[str]
Fields that are required before submission
callback=None : Function
If not none, fired after loading is completed. Must be a no-arg function
"""
self._popup = py_cui.dialogs.filedialog.FileDialogPopup(self, callback, initial_dir, popup_type, ascii_icons, limit_extensions, py_cui.WHITE_ON_BLACK, self._renderer, self._logger)
def increment_loading_bar(self):
"""Increments progress bar if loading bar popup is open
"""
if self._popup is not None:
self._popup._increment_counter()
else:
self._logger.warn('No popup is currently opened.')
def stop_loading_popup(self):
"""Leaves loading state, and closes popup.
Must be called by user to escape loading.
"""
self._loading = False
self.close_popup()
self._logger.info('Stopping open loading popup')
def close_popup(self):
"""Closes the popup, and resets focus
"""
self.lose_focus()
self._popup = None
def _refresh_height_width(self, height, width):
"""Function that updates the height and width of the CUI based on terminal window size
Parameters
----------
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
self._height = height
self._width = width
self._grid.update_grid_height_width(self._height, self._width)
for widget_id in self._widgets.keys():
self._widgets[widget_id].update_height_width()
if self._popup is not None:
self._popup.update_height_width()
def get_absolute_size(self):
"""Returns dimensions of CUI
Returns
-------
height, width : int
The dimensions of drawable CUI space in characters
"""
return self._height, self._width
# Draw Functions. Function for drawing widgets, status bars, and popups
def _draw_widgets(self):
"""Function that draws all of the widgets to the screen
"""
for widget_key in self.get_widgets().keys():
if widget_key != self._selected_widget:
self.get_widgets()[widget_key]._draw()
# We draw the selected widget last to support cursor location.
if self._selected_widget is not None:
self.get_widgets()[self._selected_widget]._draw()
self._logger.info('Drew widgets')
def _draw_status_bars(self, stdscr, height, width):
"""Draws status bar and title bar
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the status bar
height : int
Window height in terminal characters
width : int
Window width in terminal characters
"""
if self.status_bar is not None:
stdscr.attron(curses.color_pair(self.status_bar.get_color()))
stdscr.addstr(height + 3, 0, fit_text(width, self.status_bar.get_text()))
stdscr.attroff(curses.color_pair(self.status_bar.get_color()))
if self.title_bar is not None:
stdscr.attron(curses.color_pair(self.title_bar.get_color()))
stdscr.addstr(0, 0, fit_text(width, self._title, center=True))
stdscr.attroff(curses.color_pair(self.title_bar.get_color()))
def _display_window_warning(self, stdscr, error_info):
"""Function that prints some basic error info if there is an error with the CUI
Parameters
----------
stdscr : curses Standard cursor
The cursor used to draw the warning
error_info : str
The information regarding the error.
"""
stdscr.clear()
stdscr.attron(curses.color_pair(RED_ON_BLACK))
stdscr.addstr(0, 0, 'Error displaying CUI!!!')
stdscr.addstr(1, 0, 'Error Type: {}'.format(error_info))
stdscr.addstr(2, 0, 'Most likely terminal dimensions are too small.')
stdscr.attroff(curses.color_pair(RED_ON_BLACK))
stdscr.refresh()
self._logger.info('Encountered error -> {}'.format(error_info))
def _handle_key_presses(self, key_pressed):
"""Function that handles all main loop key presses.
Parameters
----------
key_pressed : py_cui.keys.KEY_*
The key being pressed
"""
# Selected widget represents which widget is being hovered over, though not necessarily in focus mode
if self._selected_widget is None:
return
selected_widget = self.get_widgets()[self._selected_widget]
# If we are in focus mode, the widget has all of the control of the keyboard except
# for the escape key, which exits focus mode.
if self._in_focused_mode and self._popup is None:
if key_pressed == py_cui.keys.KEY_ESCAPE:
self.status_bar.set_text(self._init_status_bar_text)
self._in_focused_mode = False
selected_widget.set_selected(False)
self._logger.info('Exiting focus mode on widget {}'.format(selected_widget.get_title()))
else:
# widget handles remaining py_cui.keys
self._logger.info('Widget {} handling {} key'.format(selected_widget.get_title(), key_pressed))
selected_widget._handle_key_press(key_pressed)
# Otherwise, barring a popup, we are in overview mode, meaning that arrow py_cui.keys move between widgets, and Enter key starts focus mode
elif self._popup is None:
if key_pressed == py_cui.keys.KEY_ENTER and self._selected_widget is not None and selected_widget.is_selectable():
self.move_focus(selected_widget)
for key in self._keybindings.keys():
if key_pressed == key:
command = self._keybindings[key]
self._logger.info('Detected binding for key {}, running command {}'.format(key_pressed, command.__name__))
command()
# If not in focus mode, use the arrow py_cui.keys to move around the selectable widgets.
neighbor = None
if key_pressed in py_cui.keys.ARROW_KEYS:
neighbor = self._check_if_neighbor_exists(key_pressed)
if neighbor is not None:
self.set_selected_widget(neighbor)
self._logger.info('Navigated to neighbor widget {}'.format(self.get_widgets()[self._selected_widget].get_title()))
# if we have a popup, that takes key control from both overview and focus mode
elif self._popup is not None:
self._logger.info('Popup {} handling key {}'.format(self._popup.get_title(), key_pressed))
self._popup._handle_key_press(key_pressed)
def _draw(self, stdscr):
"""Main CUI draw loop called by start()
Parameters
----------
stdscr : curses Standard screen
The screen buffer used for drawing CUI elements
"""
self._stdscr = stdscr
key_pressed = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
curses.mousemask(curses.ALL_MOUSE_EVENTS)
# stdscr.nodelay(False)
#stdscr.keypad(True)
# Initialization functions. Generates colors and renderer
self._initialize_colors()
self._initialize_widget_renderer()
# If user specified a refresh timeout, apply it here
if self._refresh_timeout > 0:
self._stdscr.timeout(self._refresh_timeout)
# If user sets non-default border characters, update them here
if self._border_characters is not None:
self._renderer._set_border_renderer_chars(self._border_characters)
# Loop where key_pressed is the last character pressed. Wait for exit key while no popup or focus mode
while key_pressed != self._exit_key or self._in_focused_mode or self._popup is not None:
try:
# If we call stop, we want to break out of the main draw loop
if self._stopped:
break
# Initialization and size adjustment
stdscr.erase()
# find height width, adjust if status/title bar added. We decrement the height by 4 to account for status/title bar and padding
if self._simulated_terminal is None:
height, width = stdscr.getmaxyx()
else:
height = self._simulated_terminal[0]
width = self._simulated_terminal[1]
height = height - 4
# If the user defined an update function to fire on each draw call,
# Run it here. This can of course be also handled user-side
# through a separate thread.
if self._on_draw_update_func is not None:
self._on_draw_update_func()
# This is what allows the CUI to be responsive. Adjust grid size based on current terminal size
# Resize the grid and the widgets if there was a resize operation
if key_pressed == curses.KEY_RESIZE:
self._logger.info('Resizing CUI to new dimensions {} by {}'.format(height, width))
try:
self._refresh_height_width(height, width)
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.info('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Here we handle mouse click events globally, or pass them to the UI element to handle
elif key_pressed == curses.KEY_MOUSE:
self._logger.info('Detected mouse click')
_, x, y, _, _ = curses.getmouse()
in_element = self.get_element_at_position(x, y)
# In first case, we click inside already selected widget, pass click for processing
if in_element is not None and in_element.is_selected():
in_element._handle_mouse_press(x, y)
# Otherwise, if not a popup, select the clicked on widget
elif in_element is not None and not isinstance(in_element, py_cui.popups.Popup):
self.move_focus(in_element)
in_element._handle_mouse_press(x, y)
# If we have a post_loading_callback, fire it here
if self._post_loading_callback is not None and not self._loading:
self._logger.info('Firing post-loading callback function {}'.format(self._post_loading_callback.__name__))
self._post_loading_callback()
self._post_loading_callback = None
# Handle widget cycling
if key_pressed == self._forward_cycle_key:
self._cycle_widgets()
elif key_pressed == self._reverse_cycle_key:
self._cycle_widgets(reverse=True)
# Handle keypresses
self._handle_key_presses(key_pressed)
try:
# Draw status/title bar, and all widgets. Selected widget will be bolded.
self._draw_status_bars(stdscr, height, width)
self._draw_widgets()
# draw the popup if required
if self._popup is not None:
self._popup._draw()
except curses.error as e:
self._logger.error('Curses error while drawing TUI')
self._display_window_warning(stdscr, str(e))
except py_cui.errors.PyCUIOutOfBoundsError as e:
self._logger.error('Resized terminal too small')
self._display_window_warning(stdscr, str(e))
# Refresh the screen
stdscr.refresh()
# Wait for next input
if self._loading or self._post_loading_callback is not None:
# When loading, refresh screen every quarter second
time.sleep(0.25)
# Need to reset key_pressed, because otherwise the previously pressed key will be used.
key_pressed = 0
elif self._stopped:
key_pressed = self._exit_key
else:
self._logger.info('Waiting for next keypress')
key_pressed = stdscr.getch()
except KeyboardInterrupt:
self._logger.info('Detect Keyboard Interrupt, Exiting...')
self._stopped = True
stdscr.erase()
stdscr.refresh()
curses.endwin()
if self._on_stop is not None:
self._logger.info('Firing onstop function {}'.format(self._on_stop.__name__))
self._on_stop()
def __format__(self, fmt):
"""Override of base format function. Prints list of current widgets.
Parameters
----------
fmt : Format
The format to override
"""
out = ''
for widget in self.get_widgets().keys():
out += '{}\n'.format(self.get_widgets()[widget].get_title())
return out
| en | 0.641862 | A python library for intuitively creating CUI/TUI interfaces with pre-built widgets. # # Author: <NAME> # Created: 12-Aug-2019 # Docs: https://jwlodek.github.io/py_cui-docs # License: BSD-3-Clause (New/Revised) # # Some python core library imports # We use shutil for getting the terminal dimensions # Threading is used for loading icon popups # Use logging library for debug purposes # py_cui uses the curses library. On windows this does not exist, but # there is a open source windows-curses module that adds curses support # for python on windows # py_cui imports # Version number Fits text to screen size Helper function to fit text within a given width. Used to fix issue with status/title bar text being too long Parameters ---------- width : int width of window in characters text : str input text center : Boolean flag to center text Returns ------- fitted_text : str text fixed depending on width Base CUI class Main user interface class for py_cui. To create a user interface, you must first create an instance of this class, and then add cells + widgets to it. Attributes ---------- cursor_x, cursor_y : int absolute position of the cursor in the CUI grid : py_cui.grid.Grid The main layout manager for the CUI widgets : dict of str - py_cui.widgets.Widget dict of widget in the grid title_bar : py_cui.statusbar.StatusBar a status bar object that gets drawn at the top of the CUI status_bar : py_cui.statusbar.StatusBar a status bar object that gets drawn at the bottom of the CUI keybindings : list of py_cui.keybinding.KeyBinding list of keybindings to check against in the main CUI loop height, width : int height of the terminal in characters, width of terminal in characters exit_key : key_code a key code for a key that exits the CUI simulated_terminal : List[int] Dimensions for an alternative simulated terminal (used for testing) Constructor for PyCUI class # When this is not set, the escape character delay # is too long for exiting focus mode # For unit testing purposes, we want to simulate terminal # dimensions so that we don't get errors # Init terminal height width. Subtract 4 from height # for title/status bar and padding # Add status and title bar # Logging object initialization for py_cui # Initialize grid, renderer, and widget dict # Variables for determining selected widget/focus mode # CUI blocks when loading popup is open # Top level keybindings. Exit key is 'q' by default # Callback to fire when CUI is stopped. Sets the CUI auto-refresh timeout to a number of seconds. Parameters ---------- timeout : int Number of seconds to wait before refreshing the CUI # We want the refresh timeout in milliseconds as an integer Adds a function that is fired during each draw call of the CUI Parameters ---------- update_function : function A no-argument or lambda function that is fired at the start of each draw call Assigns a key for automatically cycling through widgets in both focus and overview modes Parameters ---------- widget_cycle_key : py_cui.keys.KEY Key code for key to cycle through widgets Function enables logging for py_cui library Parameters ---------- log_file_path : str The target log filepath. Default 'py_cui_log.txt logging_level : int Default logging level = logging.DEBUG Function that replaces all widgets in a py_cui with those of a different widget set Parameters ---------- new_widget_set : WidgetSet The new widget set to switch to Raises ------ TypeError If input is not of type WidgetSet # Use curses termsize when possible to fix resize bug on windows. Function that is used to create additional widget sets Use this function instead of directly creating widget set object instances, to allow for logging support. Parameters ---------- num_rows : int row count for new widget set num_cols : int column count for new widget set Returns ------- new_widget_set : py_cui.widget_set.WidgetSet The new widget set object instance # Use current logging object and simulated terminal for sub-widget sets # ----------------------------------------------# # Initialization functions # # Used to initialzie CUI and its features # # ----------------------------------------------# Function that starts the CUI Function that stops the CUI, and fires the callback function. Callback must be a no arg method Sets callback function on CUI exit. Must be a no-argument function or lambda function Parameters ---------- command : function A no-argument or lambda function to be fired on exit Sets the title bar text Parameters ---------- title : str New title for CUI Sets the status bar text when in overview mode Parameters ---------- text : str Status bar text Function for initialzing curses colors. Called when CUI is first created. # Start colors in curses. # For each color pair in color map, initialize color combination. Function that creates the renderer object that will draw each widget Function for toggling unicode based border rendering Function that can be used to set arbitrary border characters for drawing widget borders by renderer. Parameters ---------- upper_left_corner : char Upper left corner character upper_right_corner : char Upper right corner character lower_left_corner : char Upper left corner character lower_right_corner : char Lower right corner character horizontal : char Horizontal border character vertical : char Vertical border character Function that gets current set of widgets Returns ------- widgets : dict of str -> widget dictionary mapping widget IDs to object instances # Widget add functions. Each of these adds a particular type of widget # to the grid in a specified location. Function that adds a new scroll menu to the CUI grid Parameters ---------- title : str The title of the scroll menu row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction Returns ------- new_scroll_menu : ScrollMenu A reference to the created scroll menu object. Function that adds a new checkbox menu to the CUI grid Parameters ---------- title : str The title of the checkbox row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction checked_char='X' : char The character used to mark 'Checked' items Returns ------- new_checkbox_menu : CheckBoxMenu A reference to the created checkbox object. Function that adds a new text box to the CUI grid Parameters ---------- title : str The title of the textbox row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction initial_text='' : str Initial text for the textbox password=False : bool Toggle to show '*' instead of characters. Returns ------- new_text_box : TextBox A reference to the created textbox object. Function that adds a new text block to the CUI grid Parameters ---------- title : str The title of the text block row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction initial_text='' : str Initial text for the text block Returns ------- new_text_block : ScrollTextBlock A reference to the created textblock object. Function that adds a new label to the CUI grid Parameters ---------- title : str The title of the label row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction Returns ------- new_label : Label A reference to the created label object. Function that adds a new block label to the CUI grid Parameters ---------- title : str The title of the block label row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction center : bool flag to tell label to be centered or left-aligned. Returns ------- new_label : BlockLabel A reference to the created block label object. Function that adds a new button to the CUI grid Parameters ---------- title : str The title of the button row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction command=None : Function A no-argument or lambda function to fire on button press. Returns ------- new_button : Button A reference to the created button object. Function that adds a new label to the CUI grid Parameters ---------- title : str The title of the label row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction min_val = 0 int min value of the slider max_val = 0 int max value of the slider step = 0 int step to incremento or decrement init_val = 0 int initial value of the slider Returns ------- new_slider : Slider A reference to the created slider object. Returns containing widget for character position Parameters ---------- x : int Horizontal character position y : int Vertical character position, top down Returns ------- in_widget : UIElement Widget or popup that is within the position None if nothing Gets all horizontal (left, right) neighbor widgets Parameters ---------- widget : py_cui.widgets.Widget The currently selected widget direction : py_cui.keys.KEY* must be an arrow key value Returns ------- id_list : list[] A list of the neighbor widget ids Gets all vertical (up, down) neighbor widgets Parameters ---------- widget : py_cui.widgets.Widget The currently selected widget direction : py_cui.keys.KEY* must be an arrow key value Returns ------- id_list : list[] A list of the neighbor widget ids # CUI status functions. Used to switch between widgets, set the mode, and # identify neighbors for overview mode Function that checks if widget has neighbor in specified cell. Used for navigating CUI, as arrow keys find the immediate neighbor Parameters ---------- direction : py_cui.keys.KEY_* The direction in which to search Returns ------- widget_id : str The widget neighbor ID if found, None otherwise # Find all the widgets in the given row or column # We select the best match to jump to (first neighbor) Function that gets currently selected widget Returns ------- selected_widget : py_cui.widgets.Widget Reference to currently selected widget object Function that sets the selected widget for the CUI Parameters ---------- widget_id : str the id of the widget to select Function that forces py_cui out of focus mode. After popup is called, focus is lost Moves focus mode to different widget Parameters ---------- widget : Widget The widget object we want to move focus to. # If autofocus buttons is selected, we automatically process the button command and reset to overview mode Function that is fired if cycle key is pressed to move to next widget Parameters ---------- reverse : bool Default false. If true, cycle widgets in reverse order. # In the event that we are focusing on a widget with that key defined, we do not cycle. Function that adds a keybinding to the CUI when in overview mode Parameters ---------- key : py_cui.keys.KEY_* The key bound to the command command : Function A no-arg or lambda function to fire on keypress # Popup functions. Used to display messages, warnings, and errors to the user. Shows a message popup Parameters ---------- title : str Message title text : str Message text Shows a warning popup Parameters ---------- title : str Warning title text : str Warning text Shows an error popup Parameters ---------- title : str Error title text : str Error text Shows a yes/no popup. The 'command' parameter must be a function with a single boolean parameter Parameters ---------- title : str Message title command : function A function taking in a single boolean parameter. Will be fired with True if yes selected, false otherwise Shows a textbox popup. The 'command' parameter must be a function with a single string parameter Parameters ---------- title : str Message title command : Function A function with a single string parameter, fired with contents of textbox when enter key pressed password=False : bool If true, write characters as '*' Shows a menu popup. The 'command' parameter must be a function with a single string parameter Parameters ---------- title : str menu title menu_items : list of str A list of menu items command : Function A function taking in a single string argument. Fired with selected menu item when ENTER pressed. run_command_if_none=False : bool If True, will run command passing in None if no menu item selected. Shows a loading icon popup Parameters ---------- title : str Message title message : str Message text. Will show as '$message...' callback=None : Function If not none, fired after loading is completed. Must be a no-arg function Shows loading bar popup. Use 'increment_loading_bar' to show progress Parameters ---------- title : str Message title num_items : int Number of items to iterate through for loading callback=None : Function If not none, fired after loading is completed. Must be a no-arg function Shows form popup. Used for inputting several fields worth of values Parameters --------- title : str Message title fields : List[str] Names of each individual field passwd_fields : List[str] Field names that should have characters hidden required : List[str] Fields that are required before submission callback=None : Function If not none, fired after loading is completed. Must be a no-arg function Shows form popup. Used for inputting several fields worth of values Paramters --------- title : str Message title fields : List[str] Names of each individual field passwd_fields : List[str] Field names that should have characters hidden required : List[str] Fields that are required before submission callback=None : Function If not none, fired after loading is completed. Must be a no-arg function Increments progress bar if loading bar popup is open Leaves loading state, and closes popup. Must be called by user to escape loading. Closes the popup, and resets focus Function that updates the height and width of the CUI based on terminal window size Parameters ---------- height : int Window height in terminal characters width : int Window width in terminal characters Returns dimensions of CUI Returns ------- height, width : int The dimensions of drawable CUI space in characters # Draw Functions. Function for drawing widgets, status bars, and popups Function that draws all of the widgets to the screen # We draw the selected widget last to support cursor location. Draws status bar and title bar Parameters ---------- stdscr : curses Standard cursor The cursor used to draw the status bar height : int Window height in terminal characters width : int Window width in terminal characters Function that prints some basic error info if there is an error with the CUI Parameters ---------- stdscr : curses Standard cursor The cursor used to draw the warning error_info : str The information regarding the error. Function that handles all main loop key presses. Parameters ---------- key_pressed : py_cui.keys.KEY_* The key being pressed # Selected widget represents which widget is being hovered over, though not necessarily in focus mode # If we are in focus mode, the widget has all of the control of the keyboard except # for the escape key, which exits focus mode. # widget handles remaining py_cui.keys # Otherwise, barring a popup, we are in overview mode, meaning that arrow py_cui.keys move between widgets, and Enter key starts focus mode # If not in focus mode, use the arrow py_cui.keys to move around the selectable widgets. # if we have a popup, that takes key control from both overview and focus mode Main CUI draw loop called by start() Parameters ---------- stdscr : curses Standard screen The screen buffer used for drawing CUI elements # Clear and refresh the screen for a blank canvas # stdscr.nodelay(False) #stdscr.keypad(True) # Initialization functions. Generates colors and renderer # If user specified a refresh timeout, apply it here # If user sets non-default border characters, update them here # Loop where key_pressed is the last character pressed. Wait for exit key while no popup or focus mode # If we call stop, we want to break out of the main draw loop # Initialization and size adjustment # find height width, adjust if status/title bar added. We decrement the height by 4 to account for status/title bar and padding # If the user defined an update function to fire on each draw call, # Run it here. This can of course be also handled user-side # through a separate thread. # This is what allows the CUI to be responsive. Adjust grid size based on current terminal size # Resize the grid and the widgets if there was a resize operation # Here we handle mouse click events globally, or pass them to the UI element to handle # In first case, we click inside already selected widget, pass click for processing # Otherwise, if not a popup, select the clicked on widget # If we have a post_loading_callback, fire it here # Handle widget cycling # Handle keypresses # Draw status/title bar, and all widgets. Selected widget will be bolded. # draw the popup if required # Refresh the screen # Wait for next input # When loading, refresh screen every quarter second # Need to reset key_pressed, because otherwise the previously pressed key will be used. Override of base format function. Prints list of current widgets. Parameters ---------- fmt : Format The format to override | 2.478815 | 2 |
isi_sdk_8_0/isi_sdk_8_0/models/auth_access_access_item_file.py | mohitjain97/isilon_sdk_python | 24 | 7403 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuthAccessAccessItemFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'mode': 'str',
'owner': 'str',
'relevant_mode': 'str'
}
attribute_map = {
'group': 'group',
'mode': 'mode',
'owner': 'owner',
'relevant_mode': 'relevant_mode'
}
def __init__(self, group=None, mode=None, owner=None, relevant_mode=None): # noqa: E501
"""AuthAccessAccessItemFile - a model defined in Swagger""" # noqa: E501
self._group = None
self._mode = None
self._owner = None
self._relevant_mode = None
self.discriminator = None
if group is not None:
self.group = group
if mode is not None:
self.mode = mode
if owner is not None:
self.owner = owner
if relevant_mode is not None:
self.relevant_mode = relevant_mode
@property
def group(self):
"""Gets the group of this AuthAccessAccessItemFile. # noqa: E501
Specifies the group name or ID for the file. # noqa: E501
:return: The group of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this AuthAccessAccessItemFile.
Specifies the group name or ID for the file. # noqa: E501
:param group: The group of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._group = group
@property
def mode(self):
"""Gets the mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits on the file. # noqa: E501
:return: The mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AuthAccessAccessItemFile.
Specifies the mode bits on the file. # noqa: E501
:param mode: The mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._mode = mode
@property
def owner(self):
"""Gets the owner of this AuthAccessAccessItemFile. # noqa: E501
Specifies the name or ID of the file owner. # noqa: E501
:return: The owner of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this AuthAccessAccessItemFile.
Specifies the name or ID of the file owner. # noqa: E501
:param owner: The owner of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._owner = owner
@property
def relevant_mode(self):
"""Gets the relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits that are related to the user. # noqa: E501
:return: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._relevant_mode
@relevant_mode.setter
def relevant_mode(self, relevant_mode):
"""Sets the relevant_mode of this AuthAccessAccessItemFile.
Specifies the mode bits that are related to the user. # noqa: E501
:param relevant_mode: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._relevant_mode = relevant_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthAccessAccessItemFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuthAccessAccessItemFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'mode': 'str',
'owner': 'str',
'relevant_mode': 'str'
}
attribute_map = {
'group': 'group',
'mode': 'mode',
'owner': 'owner',
'relevant_mode': 'relevant_mode'
}
def __init__(self, group=None, mode=None, owner=None, relevant_mode=None): # noqa: E501
"""AuthAccessAccessItemFile - a model defined in Swagger""" # noqa: E501
self._group = None
self._mode = None
self._owner = None
self._relevant_mode = None
self.discriminator = None
if group is not None:
self.group = group
if mode is not None:
self.mode = mode
if owner is not None:
self.owner = owner
if relevant_mode is not None:
self.relevant_mode = relevant_mode
@property
def group(self):
"""Gets the group of this AuthAccessAccessItemFile. # noqa: E501
Specifies the group name or ID for the file. # noqa: E501
:return: The group of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this AuthAccessAccessItemFile.
Specifies the group name or ID for the file. # noqa: E501
:param group: The group of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._group = group
@property
def mode(self):
"""Gets the mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits on the file. # noqa: E501
:return: The mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AuthAccessAccessItemFile.
Specifies the mode bits on the file. # noqa: E501
:param mode: The mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._mode = mode
@property
def owner(self):
"""Gets the owner of this AuthAccessAccessItemFile. # noqa: E501
Specifies the name or ID of the file owner. # noqa: E501
:return: The owner of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this AuthAccessAccessItemFile.
Specifies the name or ID of the file owner. # noqa: E501
:param owner: The owner of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._owner = owner
@property
def relevant_mode(self):
"""Gets the relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits that are related to the user. # noqa: E501
:return: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._relevant_mode
@relevant_mode.setter
def relevant_mode(self, relevant_mode):
"""Sets the relevant_mode of this AuthAccessAccessItemFile.
Specifies the mode bits that are related to the user. # noqa: E501
:param relevant_mode: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._relevant_mode = relevant_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthAccessAccessItemFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.658575 | # coding: utf-8 Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 3 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 AuthAccessAccessItemFile - a model defined in Swagger # noqa: E501 Gets the group of this AuthAccessAccessItemFile. # noqa: E501 Specifies the group name or ID for the file. # noqa: E501 :return: The group of this AuthAccessAccessItemFile. # noqa: E501 :rtype: str Sets the group of this AuthAccessAccessItemFile. Specifies the group name or ID for the file. # noqa: E501 :param group: The group of this AuthAccessAccessItemFile. # noqa: E501 :type: str Gets the mode of this AuthAccessAccessItemFile. # noqa: E501 Specifies the mode bits on the file. # noqa: E501 :return: The mode of this AuthAccessAccessItemFile. # noqa: E501 :rtype: str Sets the mode of this AuthAccessAccessItemFile. Specifies the mode bits on the file. # noqa: E501 :param mode: The mode of this AuthAccessAccessItemFile. # noqa: E501 :type: str Gets the owner of this AuthAccessAccessItemFile. # noqa: E501 Specifies the name or ID of the file owner. # noqa: E501 :return: The owner of this AuthAccessAccessItemFile. # noqa: E501 :rtype: str Sets the owner of this AuthAccessAccessItemFile. Specifies the name or ID of the file owner. # noqa: E501 :param owner: The owner of this AuthAccessAccessItemFile. # noqa: E501 :type: str Gets the relevant_mode of this AuthAccessAccessItemFile. # noqa: E501 Specifies the mode bits that are related to the user. # noqa: E501 :return: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501 :rtype: str Sets the relevant_mode of this AuthAccessAccessItemFile. Specifies the mode bits that are related to the user. # noqa: E501 :param relevant_mode: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.740562 | 2 |
dataconnector.py | iamthinkking/COMP4217_FinalProject | 0 | 7404 | <reponame>iamthinkking/COMP4217_FinalProject<gh_stars>0
#!/usr/bin/python3
import pymysql
class Connection:
SQL_HOST = 'localhost'
SQL_USR = ''
SQL_PWD = ''
SQL_DB = 'HOSPITAL'
# initialize database object
def __init__(self, usr, pwd):
self.USR = usr
self.PWD = <PASSWORD>
# return an database connection
def __enter__(self):
# Open database connection
self.CON = pymysql.connect("localhost", self.USR, self.PWD, "HOSPITAL", autocommit=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# make sure the database connection gets closed
self.CON.close()
def get_doctors(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_doctors();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_nurses(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_nurses();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetMedicineAllergyByMostPatients(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetMedicineAllergyByMostPatients();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternsByMostPatient(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternsByMostPatient();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternPerformanceData(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternPerformanceData();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_patients(self, q=""):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_patients('"+str(q)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def GetPatientByDiagnosisAndDate(self, start_date, end_date, diagnosis=""):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetPatientByDiagnosisAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(diagnosis) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self, patID):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_allergens_of_patient('"+str(patID)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def add_patient(self, fname, lname, dob, address, phone):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_add_patient('" + fname + "', '" + lname + "', '" + str(dob) + "', '" + address +
"', " + str(phone) + ");")
self.CON.commit()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def make_diagnosis(self, docID, patID, icdID, icdDesc, icdname, specifics):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL make_diagnosis(" + str(docID) + ", " + str(patID) + ", " + str(icdID) + ", '" +
icdDesc + "', '" + str(icdname) + "', '" + specifics + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def check_vitals(self, nurseID, patID, temp, pulse_arg, bp, resp):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL check_vitals(" + str(nurseID) + ", " + str(patID) + ", " + str(temp) + ", '" +
str(pulse_arg) + "', '" + str(bp) + "', '" + str(resp) + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def login(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_role(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetNursesByPatientAndDate(self, start_date, end_date, pat_ID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetNursesByPatientAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(pat_ID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_allergens_of_patient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_medicine_allergy_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_medicine_allergy_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def GetResultsByPatient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetResultsByPatient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_nurses_by_patient_and_date(self,start_date, end_date, patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_nurses_by_patient_and_date('" + str(start_date) + "', '" + str(end_date) + "', '"
+ str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_interns_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_interns_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
| #!/usr/bin/python3
import pymysql
class Connection:
SQL_HOST = 'localhost'
SQL_USR = ''
SQL_PWD = ''
SQL_DB = 'HOSPITAL'
# initialize database object
def __init__(self, usr, pwd):
self.USR = usr
self.PWD = <PASSWORD>
# return an database connection
def __enter__(self):
# Open database connection
self.CON = pymysql.connect("localhost", self.USR, self.PWD, "HOSPITAL", autocommit=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# make sure the database connection gets closed
self.CON.close()
def get_doctors(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_doctors();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_nurses(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_nurses();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetMedicineAllergyByMostPatients(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetMedicineAllergyByMostPatients();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternsByMostPatient(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternsByMostPatient();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternPerformanceData(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternPerformanceData();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_patients(self, q=""):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_patients('"+str(q)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def GetPatientByDiagnosisAndDate(self, start_date, end_date, diagnosis=""):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetPatientByDiagnosisAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(diagnosis) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self, patID):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_allergens_of_patient('"+str(patID)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def add_patient(self, fname, lname, dob, address, phone):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_add_patient('" + fname + "', '" + lname + "', '" + str(dob) + "', '" + address +
"', " + str(phone) + ");")
self.CON.commit()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def make_diagnosis(self, docID, patID, icdID, icdDesc, icdname, specifics):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL make_diagnosis(" + str(docID) + ", " + str(patID) + ", " + str(icdID) + ", '" +
icdDesc + "', '" + str(icdname) + "', '" + specifics + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def check_vitals(self, nurseID, patID, temp, pulse_arg, bp, resp):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL check_vitals(" + str(nurseID) + ", " + str(patID) + ", " + str(temp) + ", '" +
str(pulse_arg) + "', '" + str(bp) + "', '" + str(resp) + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def login(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_role(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetNursesByPatientAndDate(self, start_date, end_date, pat_ID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetNursesByPatientAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(pat_ID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_allergens_of_patient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_medicine_allergy_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_medicine_allergy_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def GetResultsByPatient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetResultsByPatient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_nurses_by_patient_and_date(self,start_date, end_date, patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_nurses_by_patient_and_date('" + str(start_date) + "', '" + str(end_date) + "', '"
+ str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_interns_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_interns_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data | en | 0.601329 | #!/usr/bin/python3 # initialize database object # return an database connection # Open database connection # make sure the database connection gets closed # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute() method. # Fetch all the tuples in a list of lists. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # prepare a cursor object using cursor() method # execute SQL query using execute() method. # gets only one tuple from the database's response # prepare a cursor object using cursor() method # execute SQL query using execute() method. # gets only one tuple from the database's response # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists # prepare a cursor object using cursor() method # execute SQL query using execute method # fetch all the tuples in a list of lists | 3.231074 | 3 |
flow/visualize/plot_custom_callables.py | AHammoudeh/Flow_AH | 0 | 7405 | <reponame>AHammoudeh/Flow_AH
"""Generate charts from with .npy files containing custom callables through replay."""
import argparse
from datetime import datetime
import errno
import numpy as np
import matplotlib.pyplot as plt
import os
import pytz
import sys
def make_bar_plot(vals, title):
print(len(vals))
fig = plt.figure()
plt.hist(vals, 10, facecolor='blue', alpha=0.5)
plt.title(title)
plt.xlim(1000,3000)
return fig
def plot_trip_distribution(all_trip_energy_distribution):
non_av_vals = []
figures = []
figure_names = []
for key in all_trip_energy_distribution:
if key != 'av':
non_av_vals.extend(all_trip_energy_distribution[key])
figures.append(make_bar_plot(all_trip_energy_distribution[key], key))
figure_names.append(key)
figure_names.append('All Non-AV')
figures.append(make_bar_plot(non_av_vals, 'All Non-AV'))
figure_names.append('All')
figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All'))
return figure_names, figures
def parse_flags(args):
"""Parse training options user can specify in command line.
Returns
-------
argparse.Namespace
the output parser object
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Parse argument used when running a Flow simulation.",
epilog="python train.py EXP_CONFIG")
parser.add_argument("target_folder", type=str,
help='Folder containing results')
parser.add_argument("--output_folder", type=str, required=False, default=None,
help='Folder to save charts to.')
parser.add_argument("--show_images", action='store_true',
help='Whether to display charts.')
parser.add_argument("--heatmap", type=str, required=False,
help='Make a heatmap of the supplied variable.')
return parser.parse_args(args)
if __name__ == "__main__":
flags = parse_flags(sys.argv[1:])
date = datetime.now(tz=pytz.utc)
date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y")
if flags.output_folder:
if not os.path.exists(flags.output_folder):
try:
os.makedirs(flags.output_folder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
info_dicts = []
custom_callable_names = set()
exp_names = []
for (dirpath, dir_names, file_names) in os.walk(flags.target_folder):
for file_name in file_names:
if file_name[-8:] == "info.npy":
exp_name = os.path.basename(dirpath)
info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item()
info_dicts.append(info_dict)
print(info_dict.keys())
exp_names.append(exp_name)
custom_callable_names.update(info_dict.keys())
idxs = np.argsort(exp_names)
exp_names = [exp_names[i] for i in idxs]
info_dicts = [info_dicts[i] for i in idxs]
if flags.heatmap is not None:
heatmap = np.zeros((4, 6))
pr_spacing = np.around(np.linspace(0, 0.3, 4), decimals=2)
apr_spacing = np.around(np.linspace(0, 0.5, 6), decimals=2)
for exp_name, info_dict in zip(exp_names, info_dicts):
apr_bucket = int(np.around(float(exp_name.split('_')[1][3:]) / 0.1))
pr_bucket = int(np.around(float(exp_name.split('_')[0][2:]) / 0.1))
if flags.heatmap not in info_dict:
print(exp_name)
continue
else:
val = np.mean(info_dict[flags.heatmap])
print(exp_name, pr_bucket, pr_spacing[pr_bucket], apr_bucket, apr_spacing[apr_bucket], val)
heatmap[pr_bucket, apr_bucket] = val
fig = plt.figure()
plt.imshow(heatmap, interpolation='nearest', cmap='seismic', aspect='equal', vmin=1500, vmax=3000)
plt.title(flags.heatmap)
plt.yticks(ticks=np.arange(len(pr_spacing)), labels=pr_spacing)
plt.ylabel("AV Penetration")
plt.xticks(ticks=np.arange(len(apr_spacing)), labels=apr_spacing)
plt.xlabel("Aggressive Driver Penetration")
plt.colorbar()
plt.show()
plt.close(fig)
else:
for name in custom_callable_names:
y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts]
y_stds = [np.std(info_dict[name]) for info_dict in info_dicts]
x_pos = np.arange(len(exp_names))
plt.bar(x_pos, y_vals, align='center', alpha=0.5)
plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60)
plt.xlabel('Experiment')
plt.title('I210 Replay Result: {}'.format(name))
plt.tight_layout()
if flags.output_folder:
plt.savefig(os.path.join(flags.output_folder, '{}-plot.png'.format(name)))
plt.show()
| """Generate charts from with .npy files containing custom callables through replay."""
import argparse
from datetime import datetime
import errno
import numpy as np
import matplotlib.pyplot as plt
import os
import pytz
import sys
def make_bar_plot(vals, title):
print(len(vals))
fig = plt.figure()
plt.hist(vals, 10, facecolor='blue', alpha=0.5)
plt.title(title)
plt.xlim(1000,3000)
return fig
def plot_trip_distribution(all_trip_energy_distribution):
non_av_vals = []
figures = []
figure_names = []
for key in all_trip_energy_distribution:
if key != 'av':
non_av_vals.extend(all_trip_energy_distribution[key])
figures.append(make_bar_plot(all_trip_energy_distribution[key], key))
figure_names.append(key)
figure_names.append('All Non-AV')
figures.append(make_bar_plot(non_av_vals, 'All Non-AV'))
figure_names.append('All')
figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All'))
return figure_names, figures
def parse_flags(args):
"""Parse training options user can specify in command line.
Returns
-------
argparse.Namespace
the output parser object
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Parse argument used when running a Flow simulation.",
epilog="python train.py EXP_CONFIG")
parser.add_argument("target_folder", type=str,
help='Folder containing results')
parser.add_argument("--output_folder", type=str, required=False, default=None,
help='Folder to save charts to.')
parser.add_argument("--show_images", action='store_true',
help='Whether to display charts.')
parser.add_argument("--heatmap", type=str, required=False,
help='Make a heatmap of the supplied variable.')
return parser.parse_args(args)
if __name__ == "__main__":
flags = parse_flags(sys.argv[1:])
date = datetime.now(tz=pytz.utc)
date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y")
if flags.output_folder:
if not os.path.exists(flags.output_folder):
try:
os.makedirs(flags.output_folder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
info_dicts = []
custom_callable_names = set()
exp_names = []
for (dirpath, dir_names, file_names) in os.walk(flags.target_folder):
for file_name in file_names:
if file_name[-8:] == "info.npy":
exp_name = os.path.basename(dirpath)
info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item()
info_dicts.append(info_dict)
print(info_dict.keys())
exp_names.append(exp_name)
custom_callable_names.update(info_dict.keys())
idxs = np.argsort(exp_names)
exp_names = [exp_names[i] for i in idxs]
info_dicts = [info_dicts[i] for i in idxs]
if flags.heatmap is not None:
heatmap = np.zeros((4, 6))
pr_spacing = np.around(np.linspace(0, 0.3, 4), decimals=2)
apr_spacing = np.around(np.linspace(0, 0.5, 6), decimals=2)
for exp_name, info_dict in zip(exp_names, info_dicts):
apr_bucket = int(np.around(float(exp_name.split('_')[1][3:]) / 0.1))
pr_bucket = int(np.around(float(exp_name.split('_')[0][2:]) / 0.1))
if flags.heatmap not in info_dict:
print(exp_name)
continue
else:
val = np.mean(info_dict[flags.heatmap])
print(exp_name, pr_bucket, pr_spacing[pr_bucket], apr_bucket, apr_spacing[apr_bucket], val)
heatmap[pr_bucket, apr_bucket] = val
fig = plt.figure()
plt.imshow(heatmap, interpolation='nearest', cmap='seismic', aspect='equal', vmin=1500, vmax=3000)
plt.title(flags.heatmap)
plt.yticks(ticks=np.arange(len(pr_spacing)), labels=pr_spacing)
plt.ylabel("AV Penetration")
plt.xticks(ticks=np.arange(len(apr_spacing)), labels=apr_spacing)
plt.xlabel("Aggressive Driver Penetration")
plt.colorbar()
plt.show()
plt.close(fig)
else:
for name in custom_callable_names:
y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts]
y_stds = [np.std(info_dict[name]) for info_dict in info_dicts]
x_pos = np.arange(len(exp_names))
plt.bar(x_pos, y_vals, align='center', alpha=0.5)
plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60)
plt.xlabel('Experiment')
plt.title('I210 Replay Result: {}'.format(name))
plt.tight_layout()
if flags.output_folder:
plt.savefig(os.path.join(flags.output_folder, '{}-plot.png'.format(name)))
plt.show() | en | 0.483398 | Generate charts from with .npy files containing custom callables through replay. Parse training options user can specify in command line. Returns ------- argparse.Namespace the output parser object | 2.777647 | 3 |
deployer/src/config_manager.py | yugabyte/docsearch-scraper | 0 | 7406 | <filename>deployer/src/config_manager.py
import algoliasearch
from os import environ
from . import algolia_helper
from . import snippeter
from . import emails
from . import helpers
from . import fetchers
from .helpdesk_helper import add_note, get_conversation, \
get_emails_from_conversation, get_conversation_url_from_cuid
from deployer.src.algolia_internal_api import remove_user_from_index
class ConfigManager:
instance = None
def __init__(self):
if not ConfigManager.instance:
ConfigManager.instance = ConfigManager.__ConfigManager()
@staticmethod
def encode_set(to_encode):
encoded = []
for config_name in to_encode:
try:
config_name = config_name.decode()
except AttributeError:
print("Error decoding non string var {}".format(config_name))
pass
encoded.append(config_name)
return encoded
class __ConfigManager:
def __init__(self):
self.public_dir = environ.get('PUBLIC_CONFIG_FOLDER')
self.private_dir = environ.get('PRIVATE_CONFIG_FOLDER')
if self.public_dir is None or self.private_dir is None:
print(
'PUBLIC_CONFIG_FOLDER and PRIVATE_CONFIG_FOLDER must be defined in the environment')
exit()
self.initial_public_nb_stash = None
self.final_nb_public_stash = None
self.initial_private_nb_stash = None
self.final_nb_private_stash = None
self.init()
self.ref_configs = fetchers.get_configs_from_repos()
def init(self):
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.initial_public_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.public_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.final_nb_public_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.public_dir)
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.initial_private_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.private_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.final_nb_private_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.private_dir)
def destroy(self):
if self.final_nb_public_stash != self.initial_public_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.public_dir)
if self.final_nb_private_stash != self.initial_private_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.private_dir)
def add_config(self, config_name):
key = algolia_helper.add_docsearch_key(config_name)
print(config_name + ' (' + key + ')')
config = self.ref_configs[config_name]
print('\n================================\n')
if "conversation_id" in config:
cuid = config["conversation_id"][0]
# Add email(s) to the private config & grant access
conversation = get_conversation(cuid)
emails_from_conv = get_emails_from_conversation(conversation)
analytics_statuses = emails.add(config_name, self.private_dir,
emails_to_add=emails_from_conv)
note_content = snippeter.get_email_for_config(config_name,
analytics_statuses)
add_note(cuid, note_content)
print(
'Email address fetched and stored, conversation updated and available at {}\n'.format(
get_conversation_url_from_cuid(cuid)))
else:
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(
config_name)):
analytics_statuses = emails.add(config_name,
self.private_dir)
print(snippeter.get_email_for_config(config_name,
analytics_statuses))
else:
print(snippeter.get_email_for_config(config_name))
def update_config(self, config_name):
message = config_name
try:
key = algolia_helper.get_docsearch_key(config_name)
message = message + ' (' + key + ')'
except algoliasearch.helpers.AlgoliaException:
pass
print(message)
print('\n================================\n')
print(snippeter.get_email_for_config(config_name))
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(config_name)):
emails.add(config_name, self.private_dir)
def remove_config(self, config_name):
algolia_helper.delete_docsearch_key(config_name)
algolia_helper.delete_docsearch_index(config_name)
algolia_helper.delete_docsearch_index(config_name + '_tmp')
analytics_keys = algolia_helper.list_index_analytics_key(
config_name)
for key in analytics_keys:
description = key['description'].split()
email = description[4]
print(email)
if email is not None:
remove_user_from_index(config_name, email)
emails.delete(config_name, self.private_dir)
| <filename>deployer/src/config_manager.py
import algoliasearch
from os import environ
from . import algolia_helper
from . import snippeter
from . import emails
from . import helpers
from . import fetchers
from .helpdesk_helper import add_note, get_conversation, \
get_emails_from_conversation, get_conversation_url_from_cuid
from deployer.src.algolia_internal_api import remove_user_from_index
class ConfigManager:
instance = None
def __init__(self):
if not ConfigManager.instance:
ConfigManager.instance = ConfigManager.__ConfigManager()
@staticmethod
def encode_set(to_encode):
encoded = []
for config_name in to_encode:
try:
config_name = config_name.decode()
except AttributeError:
print("Error decoding non string var {}".format(config_name))
pass
encoded.append(config_name)
return encoded
class __ConfigManager:
def __init__(self):
self.public_dir = environ.get('PUBLIC_CONFIG_FOLDER')
self.private_dir = environ.get('PRIVATE_CONFIG_FOLDER')
if self.public_dir is None or self.private_dir is None:
print(
'PUBLIC_CONFIG_FOLDER and PRIVATE_CONFIG_FOLDER must be defined in the environment')
exit()
self.initial_public_nb_stash = None
self.final_nb_public_stash = None
self.initial_private_nb_stash = None
self.final_nb_private_stash = None
self.init()
self.ref_configs = fetchers.get_configs_from_repos()
def init(self):
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.initial_public_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.public_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.public_dir)
self.final_nb_public_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.public_dir)
output = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.initial_private_nb_stash = len(output.split('\n'))
helpers.check_output_decoded(
['git', 'stash', '--include-untracked'],
cwd=self.private_dir)
output2 = helpers.check_output_decoded(['git', 'stash', 'list'],
cwd=self.private_dir)
self.final_nb_private_stash = len(output2.split('\n'))
helpers.check_output_decoded(
['git', 'pull', '-r', 'origin', 'master'],
cwd=self.private_dir)
def destroy(self):
if self.final_nb_public_stash != self.initial_public_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.public_dir)
if self.final_nb_private_stash != self.initial_private_nb_stash:
helpers.check_output_decoded(['git', 'stash', 'pop'],
cwd=self.private_dir)
def add_config(self, config_name):
key = algolia_helper.add_docsearch_key(config_name)
print(config_name + ' (' + key + ')')
config = self.ref_configs[config_name]
print('\n================================\n')
if "conversation_id" in config:
cuid = config["conversation_id"][0]
# Add email(s) to the private config & grant access
conversation = get_conversation(cuid)
emails_from_conv = get_emails_from_conversation(conversation)
analytics_statuses = emails.add(config_name, self.private_dir,
emails_to_add=emails_from_conv)
note_content = snippeter.get_email_for_config(config_name,
analytics_statuses)
add_note(cuid, note_content)
print(
'Email address fetched and stored, conversation updated and available at {}\n'.format(
get_conversation_url_from_cuid(cuid)))
else:
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(
config_name)):
analytics_statuses = emails.add(config_name,
self.private_dir)
print(snippeter.get_email_for_config(config_name,
analytics_statuses))
else:
print(snippeter.get_email_for_config(config_name))
def update_config(self, config_name):
message = config_name
try:
key = algolia_helper.get_docsearch_key(config_name)
message = message + ' (' + key + ')'
except algoliasearch.helpers.AlgoliaException:
pass
print(message)
print('\n================================\n')
print(snippeter.get_email_for_config(config_name))
if helpers.confirm(
'\nDo you want to add emails for {}?'.format(config_name)):
emails.add(config_name, self.private_dir)
def remove_config(self, config_name):
algolia_helper.delete_docsearch_key(config_name)
algolia_helper.delete_docsearch_index(config_name)
algolia_helper.delete_docsearch_index(config_name + '_tmp')
analytics_keys = algolia_helper.list_index_analytics_key(
config_name)
for key in analytics_keys:
description = key['description'].split()
email = description[4]
print(email)
if email is not None:
remove_user_from_index(config_name, email)
emails.delete(config_name, self.private_dir)
| en | 0.681699 | # Add email(s) to the private config & grant access | 1.967979 | 2 |
Source/budgie/__init__.py | pylover/budgie | 3 | 7407 |
import sys
from sqlalchemy.exc import DatabaseError
from . import cli
from .configuration import settings, init as init_config
from .observer import HelpdeskObserver, MaximumClientsReached
from .models import init as init_models, metadata, engine, check_db
from .smtp import SMTPConfigurationError
__version__ = '0.1.0-dev.0'
def start_server(cli_arguments):
init_models()
# Checking database
try:
check_db()
except DatabaseError:
print(
'Cannot connect to database. or database objects are not created yet. Please run `budgie setup-db`.',
file=sys.stderr
)
sys.exit(-1)
try:
manager = HelpdeskObserver()
manager.start()
except (
MaximumClientsReached,
SMTPConfigurationError) as ex:
print(ex, file=sys.stderr)
sys.exit(-1)
def main():
arguments = cli.init()
if arguments.version:
print(__version__)
sys.exit(0)
init_config(arguments.config_file if arguments.config_file else None)
if arguments.func is not None:
arguments.func(arguments)
|
import sys
from sqlalchemy.exc import DatabaseError
from . import cli
from .configuration import settings, init as init_config
from .observer import HelpdeskObserver, MaximumClientsReached
from .models import init as init_models, metadata, engine, check_db
from .smtp import SMTPConfigurationError
__version__ = '0.1.0-dev.0'
def start_server(cli_arguments):
init_models()
# Checking database
try:
check_db()
except DatabaseError:
print(
'Cannot connect to database. or database objects are not created yet. Please run `budgie setup-db`.',
file=sys.stderr
)
sys.exit(-1)
try:
manager = HelpdeskObserver()
manager.start()
except (
MaximumClientsReached,
SMTPConfigurationError) as ex:
print(ex, file=sys.stderr)
sys.exit(-1)
def main():
arguments = cli.init()
if arguments.version:
print(__version__)
sys.exit(0)
init_config(arguments.config_file if arguments.config_file else None)
if arguments.func is not None:
arguments.func(arguments)
| pt | 0.233784 | # Checking database | 2.183913 | 2 |
locations/spiders/shopnsave.py | thismakessand/alltheplaces | 1 | 7408 | <reponame>thismakessand/alltheplaces<filename>locations/spiders/shopnsave.py
# -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
DAY_DICT = {
'Mon': 'Mo',
'Tue': 'Tu',
'Wed': 'We',
'Thu': 'Th',
'Fri': 'Fr',
'Sat': 'Sa',
'Sun': 'Su',
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Thurs': 'Th',
'Thur': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su',
'24 hours/7 days a week': '24/7',
'Please contact store for hours': 'N/A',
}
class ShopnSaveSpider(scrapy.Spider):
name = "shopnsave"
allowed_domains = ["www.shopnsave.com"]
start_urls = (
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=3',
)
def parse(self, response):
stores = response.xpath('//table[@id="store-search-result"]/tbody/tr[@class="" or @class="store-grey"]')
for store in stores:
properties = {
"ref": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"name": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"opening_hours": self.store_hours(store.xpath('td[@class="store-result-address"]/text()[last()-1]').extract_first()),
"addr_full": store.xpath('td[@class="store-result-address"]/text()')[1].extract(),
"city": self.city(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"state": self.state(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"postcode": self.postCode(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"phone": self.phone(store.xpath('td[@class="store-result-phone"]/strong/text()')[0].extract()),
}
yield GeojsonPointItem(**properties)
def city(self, data):
str_list = data.split(',')
return str_list[0].strip()
def state(self, data):
str_list = data.split(',')
state = str_list[1].strip()
state = state[:2]
return state
def postCode(self, data):
str_list = data.split(',')
zipCode = str_list[1].strip()
return zipCode[-5:]
def phone(self, data):
return data.replace('— Main', '')
def store_hours(self, store_hours):
if "day" not in store_hours and "-" not in store_hours:
return ""
if "24 Hours, 7 days a week" in store_hours:
return "24/7"
store_hours = store_hours.replace('\r\n\t\t\t\t\t\t', '')
store_hours = store_hours.replace('Midnight', '00:00')
pattern = re.compile(r'\b(' + '|'.join(DAY_DICT.keys()) + r')\b')
store_hours = pattern.sub(lambda x: DAY_DICT[x.group()], ''.join(store_hours))
store_hours = store_hours.replace('am', ':00')
m = re.search('([0-9]{1,2})(\spm)', store_hours)
if m:
h = m.group(1)
new_h = int(h) + 12
store_hours = store_hours.replace(h + ' pm', str(new_h) + ':00')
return store_hours
| # -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
DAY_DICT = {
'Mon': 'Mo',
'Tue': 'Tu',
'Wed': 'We',
'Thu': 'Th',
'Fri': 'Fr',
'Sat': 'Sa',
'Sun': 'Su',
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Thurs': 'Th',
'Thur': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su',
'24 hours/7 days a week': '24/7',
'Please contact store for hours': 'N/A',
}
class ShopnSaveSpider(scrapy.Spider):
name = "shopnsave"
allowed_domains = ["www.shopnsave.com"]
start_urls = (
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=IL&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=1',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=2',
'https://www.shopnsave.com/stores/view-stores-by-state.html?state=MO&page=3',
)
def parse(self, response):
stores = response.xpath('//table[@id="store-search-result"]/tbody/tr[@class="" or @class="store-grey"]')
for store in stores:
properties = {
"ref": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"name": store.xpath('td[@class="store-result-address"]/text()').extract_first(),
"opening_hours": self.store_hours(store.xpath('td[@class="store-result-address"]/text()[last()-1]').extract_first()),
"addr_full": store.xpath('td[@class="store-result-address"]/text()')[1].extract(),
"city": self.city(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"state": self.state(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"postcode": self.postCode(store.xpath('td[@class="store-result-address"]/text()')[2].extract()),
"phone": self.phone(store.xpath('td[@class="store-result-phone"]/strong/text()')[0].extract()),
}
yield GeojsonPointItem(**properties)
def city(self, data):
str_list = data.split(',')
return str_list[0].strip()
def state(self, data):
str_list = data.split(',')
state = str_list[1].strip()
state = state[:2]
return state
def postCode(self, data):
str_list = data.split(',')
zipCode = str_list[1].strip()
return zipCode[-5:]
def phone(self, data):
return data.replace('— Main', '')
def store_hours(self, store_hours):
if "day" not in store_hours and "-" not in store_hours:
return ""
if "24 Hours, 7 days a week" in store_hours:
return "24/7"
store_hours = store_hours.replace('\r\n\t\t\t\t\t\t', '')
store_hours = store_hours.replace('Midnight', '00:00')
pattern = re.compile(r'\b(' + '|'.join(DAY_DICT.keys()) + r')\b')
store_hours = pattern.sub(lambda x: DAY_DICT[x.group()], ''.join(store_hours))
store_hours = store_hours.replace('am', ':00')
m = re.search('([0-9]{1,2})(\spm)', store_hours)
if m:
h = m.group(1)
new_h = int(h) + 12
store_hours = store_hours.replace(h + ' pm', str(new_h) + ':00')
return store_hours | en | 0.769321 | # -*- coding: utf-8 -*- | 2.652677 | 3 |
run.py | TovarischSuhov/QR_quest | 0 | 7409 | #!/usr/bin/env python
from app import app
app.run(debug = True)
| #!/usr/bin/env python
from app import app
app.run(debug = True)
| ru | 0.26433 | #!/usr/bin/env python | 1.112753 | 1 |
tests/twitter_learning_journal/dao/test_os_env.py | DEV3L/twitter-learning-journal | 1 | 7410 | <reponame>DEV3L/twitter-learning-journal<filename>tests/twitter_learning_journal/dao/test_os_env.py<gh_stars>1-10
from unittest.mock import patch
from app.twitter_learning_journal.dao.os_env import os_environ
@patch('app.twitter_learning_journal.dao.os_env.os')
def test_os_environ(mock_os):
expected_value = 'environment_value'
mock_os.environ.__contains__.return_value = True # patch in statement
mock_os.environ.__getitem__.return_value = expected_value
os_variable = os_environ('a_key')
assert expected_value == os_variable
mock_os.environ.__getitem__.assert_called_with('a_key')
def test_os_environ_key_missing():
expected_value = None
os_variable = os_environ('a_key')
assert expected_value == os_variable
def test_os_environ_key_missing_with_default():
expected_value = 'a_default'
os_variable = os_environ('a_key', default=expected_value)
assert expected_value == os_variable
| from unittest.mock import patch
from app.twitter_learning_journal.dao.os_env import os_environ
@patch('app.twitter_learning_journal.dao.os_env.os')
def test_os_environ(mock_os):
expected_value = 'environment_value'
mock_os.environ.__contains__.return_value = True # patch in statement
mock_os.environ.__getitem__.return_value = expected_value
os_variable = os_environ('a_key')
assert expected_value == os_variable
mock_os.environ.__getitem__.assert_called_with('a_key')
def test_os_environ_key_missing():
expected_value = None
os_variable = os_environ('a_key')
assert expected_value == os_variable
def test_os_environ_key_missing_with_default():
expected_value = 'a_default'
os_variable = os_environ('a_key', default=expected_value)
assert expected_value == os_variable | en | 0.912117 | # patch in statement | 2.722477 | 3 |
web-scraper/mongoscraper/populate.py | naveenr414/hack-umbc | 0 | 7411 | <filename>web-scraper/mongoscraper/populate.py
import pymongo
myclient = pymongo.MongoClient()
mydb = myclient["mydb"]
hor = mydb["HoR"]
sen = mydb["Senator"]
gov = mydb["Governor"]
def write(fileJSON):
myDoc = fileJSON
if( "hor" in myDoc.values()):
hor.insert_one(myDoc)
elif( "senate" in myDoc.values()):
sen.insert_one(myDoc)
else:
gov.insert_one(myDoc)
def deletes():
for x in sen.find():
sen.delete_one(x)
def prints():
for x in sen.find():
print(x)
| <filename>web-scraper/mongoscraper/populate.py
import pymongo
myclient = pymongo.MongoClient()
mydb = myclient["mydb"]
hor = mydb["HoR"]
sen = mydb["Senator"]
gov = mydb["Governor"]
def write(fileJSON):
myDoc = fileJSON
if( "hor" in myDoc.values()):
hor.insert_one(myDoc)
elif( "senate" in myDoc.values()):
sen.insert_one(myDoc)
else:
gov.insert_one(myDoc)
def deletes():
for x in sen.find():
sen.delete_one(x)
def prints():
for x in sen.find():
print(x)
| none | 1 | 2.980343 | 3 |
|
tests/test_utils_obj_value.py | ZSD-tim/dayu_widgets | 157 | 7412 | <reponame>ZSD-tim/dayu_widgets
"""
Test get_obj_value set_obj_value has_obj_value
"""
import pytest
from dayu_widgets import utils
class _HasNameAgeObject(object):
def __init__(self, name, age):
super(_HasNameAgeObject, self).__init__()
self.name = name
self.age = age
@pytest.mark.parametrize('obj', (
{'name': 'xiaoming', 'age': 18},
_HasNameAgeObject('xiaoming', 18)
))
class TestObjValue(object):
"""Test get_obj_value has_obj_value set_obj_value collection."""
@pytest.mark.parametrize('attr, default, result', (
('name', 'hhh', 'xiaoming'),
('age', 0, 18),
('score', 0, 0)
))
def test_get_obj_value(self, obj, attr, default, result):
"""Test get_obj_value with dict/object as arg. """
assert utils.get_obj_value(obj, attr, default) == result
@pytest.mark.parametrize('attr, result', (
('name', True),
('age', True),
('sex', False),
))
def test_has_obj_value(self, obj, attr, result):
"""Test has_obj_value with dict/object as arg. """
assert utils.has_obj_value(obj, attr) == result
@pytest.mark.parametrize('attr, value', (
('name', 'xiaohua'),
('age', 30),
('id', 80),
))
def test_set_obj_value(self, obj, attr, value):
"""Test set_obj_value with dict/object as arg. """
utils.set_obj_value(obj, attr, value)
assert utils.get_obj_value(obj, attr) == value
| """
Test get_obj_value set_obj_value has_obj_value
"""
import pytest
from dayu_widgets import utils
class _HasNameAgeObject(object):
def __init__(self, name, age):
super(_HasNameAgeObject, self).__init__()
self.name = name
self.age = age
@pytest.mark.parametrize('obj', (
{'name': 'xiaoming', 'age': 18},
_HasNameAgeObject('xiaoming', 18)
))
class TestObjValue(object):
"""Test get_obj_value has_obj_value set_obj_value collection."""
@pytest.mark.parametrize('attr, default, result', (
('name', 'hhh', 'xiaoming'),
('age', 0, 18),
('score', 0, 0)
))
def test_get_obj_value(self, obj, attr, default, result):
"""Test get_obj_value with dict/object as arg. """
assert utils.get_obj_value(obj, attr, default) == result
@pytest.mark.parametrize('attr, result', (
('name', True),
('age', True),
('sex', False),
))
def test_has_obj_value(self, obj, attr, result):
"""Test has_obj_value with dict/object as arg. """
assert utils.has_obj_value(obj, attr) == result
@pytest.mark.parametrize('attr, value', (
('name', 'xiaohua'),
('age', 30),
('id', 80),
))
def test_set_obj_value(self, obj, attr, value):
"""Test set_obj_value with dict/object as arg. """
utils.set_obj_value(obj, attr, value)
assert utils.get_obj_value(obj, attr) == value | en | 0.419129 | Test get_obj_value set_obj_value has_obj_value Test get_obj_value has_obj_value set_obj_value collection. Test get_obj_value with dict/object as arg. Test has_obj_value with dict/object as arg. Test set_obj_value with dict/object as arg. | 2.852573 | 3 |
desktop/core/ext-py/PyYAML-3.12/tests/lib3/test_all.py | kokosing/hue | 5,079 | 7413 |
import sys, yaml, test_appliance
def main(args=None):
collections = []
import test_yaml
collections.append(test_yaml)
if yaml.__with_libyaml__:
import test_yaml_ext
collections.append(test_yaml_ext)
return test_appliance.run(collections, args)
if __name__ == '__main__':
main()
|
import sys, yaml, test_appliance
def main(args=None):
collections = []
import test_yaml
collections.append(test_yaml)
if yaml.__with_libyaml__:
import test_yaml_ext
collections.append(test_yaml_ext)
return test_appliance.run(collections, args)
if __name__ == '__main__':
main()
| none | 1 | 1.912644 | 2 |
|
tim_camera/oop_detection_webcam.py | Tim-orius/aidem | 0 | 7414 | <reponame>Tim-orius/aidem
""" Webcam Detection with Tensorflow calssifier and object distance calculation """
__version__ = "0.1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__credits__ = "Special thanks to The Anh Vuong who came up with the original idea." \
"This code is also based off of the code from Evan Juras (see below)"
# This script is based off of a script by <NAME> (see below).
# I rewrote this script to be object oriented and added the tkinter-ui (removed command
# line functionalities) as well as several functionalities to calculate the distance
# between two detected object
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: <NAME>
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I [Evan Juras] added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
import math
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - <NAME>, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
self.width = self.stream.get(3)
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
def continue_video(self):
# Indicate that camera should resume
self.stopped = False
self.start()
class LiveDetection:
"""
"""
def __init__(self):
"""
"""
MODEL_NAME = 'Sample_Model'
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
self.__min_conf_threshold = 0.5
resW, resH = '1280x720'.split('x')
self.__imW, self.__imH = int(resW), int(resH)
use_TPU = ''
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
self.__labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if self.__labels[0] == '???':
del(self.__labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT)
self._interpreter.allocate_tensors()
# Get model details
self.__input_details = self._interpreter.get_input_details()
self.__output_details = self._interpreter.get_output_details()
self.__height = self.__input_details[0]['shape'][1]
self.__width = self.__input_details[0]['shape'][2]
self.__floating_model = (self.__input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
self.__frame_rate_calc = 1
self.__freq = cv2.getTickFrequency()
# Initialize video stream
self._videostream = VideoStream(resolution=(self.__imW,self.__imH),framerate=30).start()
time.sleep(1)
# -----------------------------------------------------------------
# Average parameters
self.avg_width_person = 45+8+4 # +8 due to borders not aligning to body
self.avg_height_person = 172
self.avg_proportion_person = self.avg_width_person / self.avg_height_person
self.test_distance = 216
# Old value:
self.fokal_empir = 1500
# Variable for new calibrated value:
self.focal_value = 0
def calibrate(self,
obj_width_cm:int=0,
obj_dist_cm:int=0,
obj_name:str=""
):
"""
"""
color_variation = 0
foc_meas = 0
for i in range(10):
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
obj_type = []
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
# Check for the right object (ensure correct measurement when several objects are detected)
if(self.__labels[int(classes[i])] != obj_name):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Calculate object width in pixel
obj_width_pixels = xmax - xmin
foc_meas += (obj_width_pixels * obj_dist_cm) / obj_width_cm
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
self.focal_value = foc_meas / 10
print("Calculated focal value:",self.focal_value)
print("Calibration done")
def detect(self):
"""
"""
color_variation = 0;
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
#num = self._interpreter.get_tensor(self.__output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# --------------------------------------------------------------------------------------------------------
coords = []
proportion_x = []
proportion_y = []
camera_distance = []
obj_type = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
if(self.__labels[int(classes[i])] != "person" and self.__labels[int(classes[i])] != "teddy bear" and self.__labels[int(classes[i])] != "chair"):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
if (i+1)*40 > 255:
color_variation += 1
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Save coordinates of detected person
coords.append([[xmin, ymin],[xmax, ymax]])
# For testing (screen width of camera)
vid_width = int(self._videostream.width)
if(len(coords) > 1):
# preparation
for a in range(len(coords)):
proportion_x.append(0)
proportion_y.append(0)
for i in range(len(coords)):
# Measure height and width of detected person (in pixel)
proportion_x[i] = coords[i][1][0] - coords[i][0][0] # Width
#proportion_y[i] = coords[i][1][1] - coords[i][0][1] # Height
#proportion_x[i] = xmax - xmin
# P = proportion_x[i]
# F = Fokalwert, W = Objektbreite (cm), P = Objektbreite (Pixel), D = Distanz (cm)
# F = (P * D) / W -> D = (F * W) / P
# F = (P * test_distance) / (45+8)
# print(F)
# Calculate object distance to camera
camera_distance.append((self.focal_value * self.avg_width_person) / proportion_x[i])
print("Distance obj "+str(i)+" ("+str(obj_type)+") - camera: "+str(camera_distance[i]), flush=True)
if(i>0):
# Calculate min dist (only horizontal)
if(obj_type[i] == "person"):
min_dist_x = proportion_x[i]/self.avg_width_person * 150
elif(obj_type[i] == "chair"):
min_dist_x = proportion_x[i]/80 * 150
else:
min_dist_x = 500
#min_dist_x = 300
for j in range(i):
min_dist_obj_x_1 = abs(coords[i][1][0] - coords[j][0][0])
min_dist_obj_x_2 = abs(coords[j][1][0] - coords[i][0][0])
dist_obj_z = abs(camera_distance[i] - camera_distance[j])
# Test with distance to borders
#min_dist_obj_x_1 = abs(coords[i][1][0] - vid_width) # To the right
#min_dist_obj_x_2 = abs(coords[i][0][0] - 0) # To the left
print("X-Distanz objekt i -> j: "+str(min_dist_obj_x_1)+" - X-Distanz obj j -> i: "+str(min_dist_obj_x_2)+" - minimale Distanz: "+str(min_dist_x), flush=True)
print("Z-Distanz objekt i - j: "+str(dist_obj_z), flush=True)
# Check for smaller distance
if(min_dist_obj_x_1 < min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_1**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("AAAA "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[i][1][0], coords[i][1][1]), (coords[j][0][0],coords[j][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][1][0], coords[i][1][1]+30), (vid_width,coords[i][1][1]+30), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[i][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[i][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[i][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[i][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
elif(min_dist_obj_x_1 > min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_2**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("BBB "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[j][1][0], coords[j][1][1]), (coords[i][0][0],coords[i][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][0][0], coords[i][0][1]), (0,coords[i][0][1]), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[j][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[j][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[j][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[j][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
else:
# ...
b = 1
else:
# ...
b = 2
else:
# ...
b = 3
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/self.__freq
self.__frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
def __del__(self):
"""
"""
# Clean up
self._videostream.stop()
cv2.destroyAllWindows()
def main():
det_ob = LiveDetection()
det_ob.detect()
del det_ob
if __name__ == "__main__":
main()
| """ Webcam Detection with Tensorflow calssifier and object distance calculation """
__version__ = "0.1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__credits__ = "Special thanks to The Anh Vuong who came up with the original idea." \
"This code is also based off of the code from Evan Juras (see below)"
# This script is based off of a script by <NAME> (see below).
# I rewrote this script to be object oriented and added the tkinter-ui (removed command
# line functionalities) as well as several functionalities to calculate the distance
# between two detected object
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: <NAME>
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I [Evan Juras] added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
import math
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - <NAME>, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
self.width = self.stream.get(3)
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
def continue_video(self):
# Indicate that camera should resume
self.stopped = False
self.start()
class LiveDetection:
"""
"""
def __init__(self):
"""
"""
MODEL_NAME = 'Sample_Model'
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
self.__min_conf_threshold = 0.5
resW, resH = '1280x720'.split('x')
self.__imW, self.__imH = int(resW), int(resH)
use_TPU = ''
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
self.__labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if self.__labels[0] == '???':
del(self.__labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
self._interpreter = Interpreter(model_path=PATH_TO_CKPT)
self._interpreter.allocate_tensors()
# Get model details
self.__input_details = self._interpreter.get_input_details()
self.__output_details = self._interpreter.get_output_details()
self.__height = self.__input_details[0]['shape'][1]
self.__width = self.__input_details[0]['shape'][2]
self.__floating_model = (self.__input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
self.__frame_rate_calc = 1
self.__freq = cv2.getTickFrequency()
# Initialize video stream
self._videostream = VideoStream(resolution=(self.__imW,self.__imH),framerate=30).start()
time.sleep(1)
# -----------------------------------------------------------------
# Average parameters
self.avg_width_person = 45+8+4 # +8 due to borders not aligning to body
self.avg_height_person = 172
self.avg_proportion_person = self.avg_width_person / self.avg_height_person
self.test_distance = 216
# Old value:
self.fokal_empir = 1500
# Variable for new calibrated value:
self.focal_value = 0
def calibrate(self,
obj_width_cm:int=0,
obj_dist_cm:int=0,
obj_name:str=""
):
"""
"""
color_variation = 0
foc_meas = 0
for i in range(10):
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
obj_type = []
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
# Check for the right object (ensure correct measurement when several objects are detected)
if(self.__labels[int(classes[i])] != obj_name):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Calculate object width in pixel
obj_width_pixels = xmax - xmin
foc_meas += (obj_width_pixels * obj_dist_cm) / obj_width_cm
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
self.focal_value = foc_meas / 10
print("Calculated focal value:",self.focal_value)
print("Calibration done")
def detect(self):
"""
"""
color_variation = 0;
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.__width, self.__height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.__floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self.__input_details[0]['index'],input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self.__output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self.__output_details[1]['index'])[0] # Class index of detected objects
scores = self._interpreter.get_tensor(self.__output_details[2]['index'])[0] # Confidence of detected objects
#num = self._interpreter.get_tensor(self.__output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# --------------------------------------------------------------------------------------------------------
coords = []
proportion_x = []
proportion_y = []
camera_distance = []
obj_type = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self.__min_conf_threshold) and (scores[i] <= 1.0)):
if(self.__labels[int(classes[i])] != "person" and self.__labels[int(classes[i])] != "teddy bear" and self.__labels[int(classes[i])] != "chair"):
continue
else:
obj_type.append(str(self.__labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * self.__imH)))
xmin = int(max(1,(boxes[i][1] * self.__imW)))
ymax = int(min(self.__imH,(boxes[i][2] * self.__imH)))
xmax = int(min(self.__imW,(boxes[i][3] * self.__imW)))
if (i+1)*40 > 255:
color_variation += 1
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, (40+(40*i))%255, (color_variation*40)%255), 2)
# Save coordinates of detected person
coords.append([[xmin, ymin],[xmax, ymax]])
# For testing (screen width of camera)
vid_width = int(self._videostream.width)
if(len(coords) > 1):
# preparation
for a in range(len(coords)):
proportion_x.append(0)
proportion_y.append(0)
for i in range(len(coords)):
# Measure height and width of detected person (in pixel)
proportion_x[i] = coords[i][1][0] - coords[i][0][0] # Width
#proportion_y[i] = coords[i][1][1] - coords[i][0][1] # Height
#proportion_x[i] = xmax - xmin
# P = proportion_x[i]
# F = Fokalwert, W = Objektbreite (cm), P = Objektbreite (Pixel), D = Distanz (cm)
# F = (P * D) / W -> D = (F * W) / P
# F = (P * test_distance) / (45+8)
# print(F)
# Calculate object distance to camera
camera_distance.append((self.focal_value * self.avg_width_person) / proportion_x[i])
print("Distance obj "+str(i)+" ("+str(obj_type)+") - camera: "+str(camera_distance[i]), flush=True)
if(i>0):
# Calculate min dist (only horizontal)
if(obj_type[i] == "person"):
min_dist_x = proportion_x[i]/self.avg_width_person * 150
elif(obj_type[i] == "chair"):
min_dist_x = proportion_x[i]/80 * 150
else:
min_dist_x = 500
#min_dist_x = 300
for j in range(i):
min_dist_obj_x_1 = abs(coords[i][1][0] - coords[j][0][0])
min_dist_obj_x_2 = abs(coords[j][1][0] - coords[i][0][0])
dist_obj_z = abs(camera_distance[i] - camera_distance[j])
# Test with distance to borders
#min_dist_obj_x_1 = abs(coords[i][1][0] - vid_width) # To the right
#min_dist_obj_x_2 = abs(coords[i][0][0] - 0) # To the left
print("X-Distanz objekt i -> j: "+str(min_dist_obj_x_1)+" - X-Distanz obj j -> i: "+str(min_dist_obj_x_2)+" - minimale Distanz: "+str(min_dist_x), flush=True)
print("Z-Distanz objekt i - j: "+str(dist_obj_z), flush=True)
# Check for smaller distance
if(min_dist_obj_x_1 < min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_1**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("AAAA "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[i][1][0], coords[i][1][1]), (coords[j][0][0],coords[j][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][1][0], coords[i][1][1]+30), (vid_width,coords[i][1][1]+30), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[i][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[i][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[i][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[i][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
elif(min_dist_obj_x_1 > min_dist_obj_x_2):
objects_distance = math.sqrt(min_dist_obj_x_2**2 + dist_obj_z**2)
if(objects_distance < min_dist_x):
print("BBB "+str(objects_distance)+" j = "+obj_type[j], flush=True)
cv2.line(frame, (coords[j][1][0], coords[j][1][1]), (coords[i][0][0],coords[i][1][1]), (255,10,0), 2)
#cv2.line(frame, (coords[i][0][0], coords[i][0][1]), (0,coords[i][0][1]), (255,10,0), 2)
dist_label = '%s / %d' % (round(objects_distance, 2), round(min_dist_x, 2))
dist_labelSize, dist_baseLine = cv2.getTextSize(dist_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
dist_label_ymin = max(coords[j][1][1], dist_labelSize[1] + 10)
cv2.rectangle(frame, (coords[j][1][0], dist_label_ymin-dist_labelSize[1]-10), (coords[j][1][0]+dist_labelSize[0], dist_label_ymin+dist_baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, dist_label, (coords[j][1][0], dist_label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
else:
# ...
b = 1
else:
# ...
b = 2
else:
# ...
b = 3
# Draw label
object_name = self.__labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(self.__frame_rate_calc),(15,35),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/self.__freq
self.__frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
def __del__(self):
"""
"""
# Clean up
self._videostream.stop()
cv2.destroyAllWindows()
def main():
det_ob = LiveDetection()
det_ob.detect()
del det_ob
if __name__ == "__main__":
main() | en | 0.749012 | Webcam Detection with Tensorflow calssifier and object distance calculation # This script is based off of a script by <NAME> (see below). # I rewrote this script to be object oriented and added the tkinter-ui (removed command # line functionalities) as well as several functionalities to calculate the distance # between two detected object ######## Webcam Object Detection Using Tensorflow-trained Classifier ######### # # Author: <NAME> # Date: 10/27/19 # Description: # This program uses a TensorFlow Lite model to perform object detection on a live webcam # feed. It draws boxes and scores around the objects of interest in each frame from the # webcam. To improve FPS, the webcam object runs in a separate thread from the main program. # This script will work with either a Picamera or regular USB webcam. # # This code is based off the TensorFlow Lite image classification example at: # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py # # I [Evan Juras] added my own method of drawing boxes and labels using OpenCV. # Import packages # Define VideoStream class to handle streaming of video from webcam in separate processing thread # Source - <NAME>, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/ Camera object that controls video streaming from the Picamera # Initialize the PiCamera and the camera image stream # Read first frame from the stream # Variable to control when the camera is stopped # Start the thread that reads frames from the video stream # Keep looping indefinitely until the thread is stopped # If the camera is stopped, stop the thread # Close camera resources # Otherwise, grab the next frame from the stream # Return the most recent frame # Indicate that the camera and thread should be stopped # Indicate that camera should resume # Import TensorFlow libraries # If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow # If using Coral Edge TPU, import the load_delegate library # If using Edge TPU, assign filename for Edge TPU model # If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite' # Get path to current working directory # Path to .tflite file, which contains the model that is used for object detection # Path to label map file # Load the label map # Have to do a weird fix for label map if using the COCO "starter model" from # https://www.tensorflow.org/lite/models/object_detection/overview # First label is '???', which has to be removed. # Load the Tensorflow Lite model. # If using Edge TPU, use special load_delegate argument # Get model details # Initialize frame rate calculation # Initialize video stream # ----------------------------------------------------------------- # Average parameters # +8 due to borders not aligning to body # Old value: # Variable for new calibrated value: # Grab frame from video stream # Acquire frame and resize to expected shape [1xHxWx3] # Normalize pixel values if using a floating model (i.e. if model is non-quantized) # Perform the actual detection by running the model with the image as input # Retrieve detection results # Bounding box coordinates of detected objects # Class index of detected objects # Confidence of detected objects # Check for the right object (ensure correct measurement when several objects are detected) # Get bounding box coordinates and draw box # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min() # Calculate object width in pixel # Draw label # Look up object name from "labels" array using class index # Example: 'person: 72%' # Get font size # Make sure not to draw label too close to top of window # Draw white box to put label text in # Draw label text # Draw framerate in corner of frame # All the results have been drawn on the frame, so it's time to display it. #for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True): # Start timer (for calculating frame rate) # Grab frame from video stream # Acquire frame and resize to expected shape [1xHxWx3] # Normalize pixel values if using a floating model (i.e. if model is non-quantized) # Perform the actual detection by running the model with the image as input # Retrieve detection results # Bounding box coordinates of detected objects # Class index of detected objects # Confidence of detected objects #num = self._interpreter.get_tensor(self.__output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed) # -------------------------------------------------------------------------------------------------------- # Loop over all detections and draw detection box if confidence is above minimum threshold # Get bounding box coordinates and draw box # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min() # Save coordinates of detected person # For testing (screen width of camera) # preparation # Measure height and width of detected person (in pixel) # Width #proportion_y[i] = coords[i][1][1] - coords[i][0][1] # Height #proportion_x[i] = xmax - xmin # P = proportion_x[i] # F = Fokalwert, W = Objektbreite (cm), P = Objektbreite (Pixel), D = Distanz (cm) # F = (P * D) / W -> D = (F * W) / P # F = (P * test_distance) / (45+8) # print(F) # Calculate object distance to camera # Calculate min dist (only horizontal) #min_dist_x = 300 # Test with distance to borders #min_dist_obj_x_1 = abs(coords[i][1][0] - vid_width) # To the right #min_dist_obj_x_2 = abs(coords[i][0][0] - 0) # To the left # Check for smaller distance #cv2.line(frame, (coords[i][1][0], coords[i][1][1]+30), (vid_width,coords[i][1][1]+30), (255,10,0), 2) #cv2.line(frame, (coords[i][0][0], coords[i][0][1]), (0,coords[i][0][1]), (255,10,0), 2) # ... # ... # ... # Draw label # Look up object name from "labels" array using class index # Example: 'person: 72%' # Get font size # Make sure not to draw label too close to top of window # Draw white box to put label text in # Draw label text # Draw framerate in corner of frame # All the results have been drawn on the frame, so it's time to display it. # Calculate framerate # Press 'q' to quit # Clean up | 2.767316 | 3 |
modules/zabbix_smart.py | yakumo-saki/smart_to_zabbix | 0 | 7415 | <filename>modules/zabbix_smart.py
import json
import logging
import config as cfg
from modules.const import Keys, AttrKey
from modules.zabbix_sender import send_to_zabbix
logger = logging.getLogger(__name__)
SMART_ATTR_KEY = "ata_smart_attributes"
NVME_ATTR_KEY = "nvme_smart_health_information_log"
def send_attribute_discovery(result):
"""
zabbixにS.M.A.R.T Attribute LLDデータを送信します。
Attribute LLDとは要するにSMART値すべて
"""
logger.info("Sending S.M.A.R.T attribute discovery to zabbix")
discovery_result = []
for device in result:
logger.info("Listing S.M.A.R.T attributes: " + device)
detail = result[device]
discovery = {AttrKey.DEV_NAME: device, AttrKey.DISK_NAME: detail["model_name"]}
if (SMART_ATTR_KEY in detail):
discovery_result = create_attribute_list_non_nvme(discovery, detail[SMART_ATTR_KEY])
elif (NVME_ATTR_KEY in detail):
discovery_result = create_attribute_list_nvme(discovery, detail[NVME_ATTR_KEY])
data = {"request": "sender data", "data":[]}
valueStr = json.dumps({"data": discovery_result})
one_data = {"host": cfg.ZABBIX_HOST, "key": AttrKey.KEY, "value": f"{valueStr}"}
data["data"].append(one_data)
send_to_zabbix(data)
return None
def create_attribute_list_non_nvme(discovery_base, smart_attributes):
import copy
result = []
for attr in smart_attributes["table"]:
discovery = copy.deepcopy(discovery_base)
# non NVMeの場合、 Unknown Attributeがあり得るので、SMART ID を名前の先頭につけておく
discovery[AttrKey.ATTR_NAME] = "{0} {1}".format(attr["id"], attr["name"])
discovery[AttrKey.ATTR_ID] = attr["id"]
result.append(discovery)
return result
def create_attribute_list_nvme(discovery_base, nvme_health_info):
import copy
result = []
for key in nvme_health_info:
discovery = copy.deepcopy(discovery_base)
if key == "temperature_sensors":
for idx, _ in enumerate(nvme_health_info[key]):
# temperature_sensorsの名前の通り、複数の温度センサーがあると値が複数入るので
# temperature_sensors1,2 のような名前に展開する
discovery[AttrKey.ATTR_NAME] = f"temperature_sensors{idx}"
discovery[AttrKey.ATTR_ID] = f"temperature_sensors{idx}"
else:
discovery[AttrKey.ATTR_NAME] = key
discovery[AttrKey.ATTR_ID] = key
result.append(discovery)
return result
def send_smart_data(data):
logger.info("Send S.M.A.R.T data to zabbix")
results = []
for dev in data:
logger.info("Listing S.M.A.R.T data: " + dev)
detail = data[dev] # /dev/sda
if ("ata_smart_attributes" in detail):
results = create_value_list_non_nvme(dev, detail["ata_smart_attributes"])
elif ("nvme_smart_health_information_log" in detail):
results = create_value_list_nvme(dev, detail["nvme_smart_health_information_log"])
sender_data = {"request": "sender data", "data": results}
#valueStr = json.dumps({"data": discovery_result})
# print(json.dumps(sender_data, indent=2))
send_to_zabbix(sender_data)
return None
def create_value_list_non_nvme(dev, smart_attributes):
results = []
for attr in smart_attributes["table"]:
keyvalue = {
AttrKey.RAWVALUE_KEY.format(dev, attr["id"]): attr["raw"]["value"],
AttrKey.VALUE_KEY.format(dev, attr["id"]): attr["value"],
AttrKey.WORST_KEY.format(dev, attr["id"]): attr["worst"]
}
if ("thresh" in attr):
keyvalue[AttrKey.THRESH_KEY.format(dev, attr["id"])] = attr["thresh"]
for k,v in keyvalue.items():
results.append({"host": cfg.ZABBIX_HOST, "key": k, "value": v})
return results
def create_value_list_nvme(dev, nvme_health_info):
results = []
for key in nvme_health_info:
# NVMe にはthreshouldやworstはなく、valueだけ
if key == "temperature_sensors":
# temperature_sensorsの複数の値は 末尾に連番をつけて展開されている
for idx, val in enumerate(nvme_health_info[key]):
key = AttrKey.VALUE_KEY.format(dev, f"temperature_sensors{idx}")
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
else:
val = nvme_health_info[key]
key = AttrKey.VALUE_KEY.format(dev, key)
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
return results
| <filename>modules/zabbix_smart.py
import json
import logging
import config as cfg
from modules.const import Keys, AttrKey
from modules.zabbix_sender import send_to_zabbix
logger = logging.getLogger(__name__)
SMART_ATTR_KEY = "ata_smart_attributes"
NVME_ATTR_KEY = "nvme_smart_health_information_log"
def send_attribute_discovery(result):
"""
zabbixにS.M.A.R.T Attribute LLDデータを送信します。
Attribute LLDとは要するにSMART値すべて
"""
logger.info("Sending S.M.A.R.T attribute discovery to zabbix")
discovery_result = []
for device in result:
logger.info("Listing S.M.A.R.T attributes: " + device)
detail = result[device]
discovery = {AttrKey.DEV_NAME: device, AttrKey.DISK_NAME: detail["model_name"]}
if (SMART_ATTR_KEY in detail):
discovery_result = create_attribute_list_non_nvme(discovery, detail[SMART_ATTR_KEY])
elif (NVME_ATTR_KEY in detail):
discovery_result = create_attribute_list_nvme(discovery, detail[NVME_ATTR_KEY])
data = {"request": "sender data", "data":[]}
valueStr = json.dumps({"data": discovery_result})
one_data = {"host": cfg.ZABBIX_HOST, "key": AttrKey.KEY, "value": f"{valueStr}"}
data["data"].append(one_data)
send_to_zabbix(data)
return None
def create_attribute_list_non_nvme(discovery_base, smart_attributes):
import copy
result = []
for attr in smart_attributes["table"]:
discovery = copy.deepcopy(discovery_base)
# non NVMeの場合、 Unknown Attributeがあり得るので、SMART ID を名前の先頭につけておく
discovery[AttrKey.ATTR_NAME] = "{0} {1}".format(attr["id"], attr["name"])
discovery[AttrKey.ATTR_ID] = attr["id"]
result.append(discovery)
return result
def create_attribute_list_nvme(discovery_base, nvme_health_info):
import copy
result = []
for key in nvme_health_info:
discovery = copy.deepcopy(discovery_base)
if key == "temperature_sensors":
for idx, _ in enumerate(nvme_health_info[key]):
# temperature_sensorsの名前の通り、複数の温度センサーがあると値が複数入るので
# temperature_sensors1,2 のような名前に展開する
discovery[AttrKey.ATTR_NAME] = f"temperature_sensors{idx}"
discovery[AttrKey.ATTR_ID] = f"temperature_sensors{idx}"
else:
discovery[AttrKey.ATTR_NAME] = key
discovery[AttrKey.ATTR_ID] = key
result.append(discovery)
return result
def send_smart_data(data):
logger.info("Send S.M.A.R.T data to zabbix")
results = []
for dev in data:
logger.info("Listing S.M.A.R.T data: " + dev)
detail = data[dev] # /dev/sda
if ("ata_smart_attributes" in detail):
results = create_value_list_non_nvme(dev, detail["ata_smart_attributes"])
elif ("nvme_smart_health_information_log" in detail):
results = create_value_list_nvme(dev, detail["nvme_smart_health_information_log"])
sender_data = {"request": "sender data", "data": results}
#valueStr = json.dumps({"data": discovery_result})
# print(json.dumps(sender_data, indent=2))
send_to_zabbix(sender_data)
return None
def create_value_list_non_nvme(dev, smart_attributes):
results = []
for attr in smart_attributes["table"]:
keyvalue = {
AttrKey.RAWVALUE_KEY.format(dev, attr["id"]): attr["raw"]["value"],
AttrKey.VALUE_KEY.format(dev, attr["id"]): attr["value"],
AttrKey.WORST_KEY.format(dev, attr["id"]): attr["worst"]
}
if ("thresh" in attr):
keyvalue[AttrKey.THRESH_KEY.format(dev, attr["id"])] = attr["thresh"]
for k,v in keyvalue.items():
results.append({"host": cfg.ZABBIX_HOST, "key": k, "value": v})
return results
def create_value_list_nvme(dev, nvme_health_info):
results = []
for key in nvme_health_info:
# NVMe にはthreshouldやworstはなく、valueだけ
if key == "temperature_sensors":
# temperature_sensorsの複数の値は 末尾に連番をつけて展開されている
for idx, val in enumerate(nvme_health_info[key]):
key = AttrKey.VALUE_KEY.format(dev, f"temperature_sensors{idx}")
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
else:
val = nvme_health_info[key]
key = AttrKey.VALUE_KEY.format(dev, key)
results.append({"host": cfg.ZABBIX_HOST, "key": key, "value": val})
return results
| ja | 0.942692 | zabbixにS.M.A.R.T Attribute LLDデータを送信します。 Attribute LLDとは要するにSMART値すべて # non NVMeの場合、 Unknown Attributeがあり得るので、SMART ID を名前の先頭につけておく # temperature_sensorsの名前の通り、複数の温度センサーがあると値が複数入るので # temperature_sensors1,2 のような名前に展開する # /dev/sda #valueStr = json.dumps({"data": discovery_result}) # print(json.dumps(sender_data, indent=2)) # NVMe にはthreshouldやworstはなく、valueだけ # temperature_sensorsの複数の値は 末尾に連番をつけて展開されている | 2.237193 | 2 |
data.py | kpister/biaxial-rnn-music-composition | 0 | 7416 | import itertools
from midi_to_statematrix import UPPER_BOUND, LOWER_BOUND
def startSentinel():
def noteSentinel(note):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
return part_position + part_pitchclass + [0] * 66 + [1]
return [noteSentinel(note) for note in range(UPPER_BOUND - LOWER_BOUND)]
def getOrDefault(l, i, d):
try:
return l[i]
except IndexError:
return d
def buildContext(state):
context = [0] * 12
for note, notestate in enumerate(state):
if notestate[0] == 1:
pitchclass = (note + LOWER_BOUND) % 12
context[pitchclass] += 1
return context
def buildBeat(time):
return [
2 * x - 1 for x in [time % 2, (time // 2) % 2, (time // 4) % 2, (time // 8) % 2]
]
def noteInputForm(note, state, context, beat):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
# Concatenate the note states for the previous vicinity
part_prev_vicinity = list(
itertools.chain.from_iterable(
(getOrDefault(state, note + i, [0, 0]) for i in range(-12, 13))
)
)
part_context = context[pitchclass:] + context[:pitchclass]
return (
part_position + part_pitchclass + part_prev_vicinity + part_context + beat + [0]
)
def noteStateSingleToInputForm(state, time):
beat = buildBeat(time)
context = buildContext(state)
return [noteInputForm(note, state, context, beat) for note in range(len(state))]
def noteStateMatrixToInputForm(statematrix):
# NOTE: May have to transpose this or transform it in some way to make Theano like it
# [startSentinel()] +
inputform = [
noteStateSingleToInputForm(state, time)
for time, state in enumerate(statematrix)
]
return inputform
| import itertools
from midi_to_statematrix import UPPER_BOUND, LOWER_BOUND
def startSentinel():
def noteSentinel(note):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
return part_position + part_pitchclass + [0] * 66 + [1]
return [noteSentinel(note) for note in range(UPPER_BOUND - LOWER_BOUND)]
def getOrDefault(l, i, d):
try:
return l[i]
except IndexError:
return d
def buildContext(state):
context = [0] * 12
for note, notestate in enumerate(state):
if notestate[0] == 1:
pitchclass = (note + LOWER_BOUND) % 12
context[pitchclass] += 1
return context
def buildBeat(time):
return [
2 * x - 1 for x in [time % 2, (time // 2) % 2, (time // 4) % 2, (time // 8) % 2]
]
def noteInputForm(note, state, context, beat):
position = note
part_position = [position]
pitchclass = (note + LOWER_BOUND) % 12
part_pitchclass = [int(i == pitchclass) for i in range(12)]
# Concatenate the note states for the previous vicinity
part_prev_vicinity = list(
itertools.chain.from_iterable(
(getOrDefault(state, note + i, [0, 0]) for i in range(-12, 13))
)
)
part_context = context[pitchclass:] + context[:pitchclass]
return (
part_position + part_pitchclass + part_prev_vicinity + part_context + beat + [0]
)
def noteStateSingleToInputForm(state, time):
beat = buildBeat(time)
context = buildContext(state)
return [noteInputForm(note, state, context, beat) for note in range(len(state))]
def noteStateMatrixToInputForm(statematrix):
# NOTE: May have to transpose this or transform it in some way to make Theano like it
# [startSentinel()] +
inputform = [
noteStateSingleToInputForm(state, time)
for time, state in enumerate(statematrix)
]
return inputform
| en | 0.879382 | # Concatenate the note states for the previous vicinity # NOTE: May have to transpose this or transform it in some way to make Theano like it # [startSentinel()] + | 2.608891 | 3 |
ocdb/ws/controllers/datasets.py | eocdb/ocdb-server | 0 | 7417 | # The MIT License (MIT)
# Copyright (c) 2018 by EUMETSAT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List, Union
from ..context import WsContext
from ...core.asserts import assert_not_none, assert_one_of, assert_instance
from ...core.models.dataset import Dataset
from ...core.models.dataset_query import DatasetQuery
from ...core.models.dataset_query_result import DatasetQueryResult
from ...core.models.dataset_ref import DatasetRef
from ...core.models.dataset_validation_result import DatasetValidationResult
from ...core.models.qc_info import QcInfo, QC_STATUS_SUBMITTED
from ...core.val import validator
from ...ws.errors import WsResourceNotFoundError, WsBadRequestError, WsNotImplementedError
def validate_dataset(ctx: WsContext, dataset: Dataset) -> DatasetValidationResult:
return validator.validate_dataset(dataset, ctx.config)
def find_datasets(ctx: WsContext,
expr: str = None,
region: List[float] = None,
time: List[str] = None,
wdepth: List[float] = None,
mtype: str = 'all',
wlmode: str = 'all',
shallow: str = 'no',
pmode: str = 'contains',
pgroup: List[str] = None,
status: str = None,
submission_id: str = None,
pname: List[str] = None,
geojson: bool = False,
offset: int = 1,
user_id: str = None,
count: int = 1000) -> DatasetQueryResult:
"""Find datasets."""
assert_one_of(wlmode, ['all', 'multispectral', 'hyperspectral'], name='wlmode')
assert_one_of(shallow, ['no', 'yes', 'exclusively'], name='shallow')
assert_one_of(pmode, ['contains', 'same_cruise', 'dont_apply'], name='pmode')
if pgroup is not None:
assert_instance(pgroup, [])
# Ensuring that the search uses lower case pnames
if pname:
pname = [p.lower() for p in pname]
query = DatasetQuery()
query.expr = expr
query.region = region
query.time = time
query.wdepth = wdepth
query.mtype = mtype
query.wlmode = wlmode
query.shallow = shallow
query.pmode = pmode
query.pgroup = pgroup
query.submission_id = submission_id
query.status = status
query.pname = pname
query.geojson = geojson
query.offset = offset
query.count = count
query.user_id = user_id
result = DatasetQueryResult({}, 0, [], query)
for driver in ctx.db_drivers:
result_part = driver.instance().find_datasets(query)
result.total_count += result_part.total_count
result.datasets += result_part.datasets
result.dataset_ids += result_part.dataset_ids
result.locations.update(result_part.locations)
return result
def add_dataset(ctx: WsContext,
dataset: Dataset) -> DatasetRef:
"""Add a new dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
dataset_id = ctx.db_driver.instance().add_dataset(dataset)
if not dataset_id:
raise WsBadRequestError(f"Could not add dataset {dataset.path}")
return DatasetRef(dataset_id, dataset.path, dataset.filename)
def update_dataset(ctx: WsContext,
dataset: Dataset):
"""Update an existing dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
updated = ctx.db_driver.instance().update_dataset(dataset)
if not updated:
raise WsResourceNotFoundError(f"Dataset with ID {dataset.id} not found")
return updated
def delete_dataset(ctx: WsContext,
dataset_id: str):
"""Delete an existing dataset."""
# assert_not_none(api_key, name='api_key')
assert_not_none(dataset_id, name='dataset_id')
deleted = ctx.db_driver.instance().delete_dataset(dataset_id)
if not deleted:
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
return deleted
def get_dataset_by_id_strict(ctx: WsContext,
dataset_id: str) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
if dataset is not None:
return dataset
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
def get_dataset_by_id(ctx: WsContext,
dataset_id: Union[dict, str]) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
# The dataset_id may be a dataset json object
if isinstance(dataset_id, dict):
dataset_id = dataset_id['id']
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
return dataset
# noinspection PyUnusedLocal,PyTypeChecker
def get_datasets_in_path(ctx: WsContext,
affil: str,
project: str,
cruise: str) -> List[DatasetRef]:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
# TODO (generated): implement operation get_datasets_in_bucket()
raise WsNotImplementedError('Operation get_datasets_in_bucket() not yet implemented')
# noinspection PyUnusedLocal,PyTypeChecker
def get_dataset_by_name(ctx: WsContext,
affil: str,
project: str,
cruise: str,
name: str) -> str:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
assert_not_none(name, name='name')
# TODO (generated): implement operation get_dataset_by_bucket_and_name()
raise WsNotImplementedError('Operation get_dataset_by_bucket_and_name() not yet implemented')
# noinspection PyUnusedLocal
def get_dataset_qc_info(ctx: WsContext,
dataset_id: str) -> QcInfo:
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
qc_info_dict = dataset.metadata.get("qc_info")
return QcInfo.from_dict(qc_info_dict) if qc_info_dict else QcInfo(QC_STATUS_SUBMITTED)
# noinspection PyUnusedLocal
def set_dataset_qc_info(ctx: WsContext,
dataset_id: str,
qc_info: QcInfo):
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
dataset.metadata["qc_info"] = qc_info.to_dict()
ctx.db_driver.update_dataset(dataset)
| # The MIT License (MIT)
# Copyright (c) 2018 by EUMETSAT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List, Union
from ..context import WsContext
from ...core.asserts import assert_not_none, assert_one_of, assert_instance
from ...core.models.dataset import Dataset
from ...core.models.dataset_query import DatasetQuery
from ...core.models.dataset_query_result import DatasetQueryResult
from ...core.models.dataset_ref import DatasetRef
from ...core.models.dataset_validation_result import DatasetValidationResult
from ...core.models.qc_info import QcInfo, QC_STATUS_SUBMITTED
from ...core.val import validator
from ...ws.errors import WsResourceNotFoundError, WsBadRequestError, WsNotImplementedError
def validate_dataset(ctx: WsContext, dataset: Dataset) -> DatasetValidationResult:
return validator.validate_dataset(dataset, ctx.config)
def find_datasets(ctx: WsContext,
expr: str = None,
region: List[float] = None,
time: List[str] = None,
wdepth: List[float] = None,
mtype: str = 'all',
wlmode: str = 'all',
shallow: str = 'no',
pmode: str = 'contains',
pgroup: List[str] = None,
status: str = None,
submission_id: str = None,
pname: List[str] = None,
geojson: bool = False,
offset: int = 1,
user_id: str = None,
count: int = 1000) -> DatasetQueryResult:
"""Find datasets."""
assert_one_of(wlmode, ['all', 'multispectral', 'hyperspectral'], name='wlmode')
assert_one_of(shallow, ['no', 'yes', 'exclusively'], name='shallow')
assert_one_of(pmode, ['contains', 'same_cruise', 'dont_apply'], name='pmode')
if pgroup is not None:
assert_instance(pgroup, [])
# Ensuring that the search uses lower case pnames
if pname:
pname = [p.lower() for p in pname]
query = DatasetQuery()
query.expr = expr
query.region = region
query.time = time
query.wdepth = wdepth
query.mtype = mtype
query.wlmode = wlmode
query.shallow = shallow
query.pmode = pmode
query.pgroup = pgroup
query.submission_id = submission_id
query.status = status
query.pname = pname
query.geojson = geojson
query.offset = offset
query.count = count
query.user_id = user_id
result = DatasetQueryResult({}, 0, [], query)
for driver in ctx.db_drivers:
result_part = driver.instance().find_datasets(query)
result.total_count += result_part.total_count
result.datasets += result_part.datasets
result.dataset_ids += result_part.dataset_ids
result.locations.update(result_part.locations)
return result
def add_dataset(ctx: WsContext,
dataset: Dataset) -> DatasetRef:
"""Add a new dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
dataset_id = ctx.db_driver.instance().add_dataset(dataset)
if not dataset_id:
raise WsBadRequestError(f"Could not add dataset {dataset.path}")
return DatasetRef(dataset_id, dataset.path, dataset.filename)
def update_dataset(ctx: WsContext,
dataset: Dataset):
"""Update an existing dataset."""
assert_not_none(dataset)
validation_result = validator.validate_dataset(dataset, ctx.config)
if validation_result.status == "ERROR":
raise WsBadRequestError(f"Invalid dataset.")
updated = ctx.db_driver.instance().update_dataset(dataset)
if not updated:
raise WsResourceNotFoundError(f"Dataset with ID {dataset.id} not found")
return updated
def delete_dataset(ctx: WsContext,
dataset_id: str):
"""Delete an existing dataset."""
# assert_not_none(api_key, name='api_key')
assert_not_none(dataset_id, name='dataset_id')
deleted = ctx.db_driver.instance().delete_dataset(dataset_id)
if not deleted:
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
return deleted
def get_dataset_by_id_strict(ctx: WsContext,
dataset_id: str) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
if dataset is not None:
return dataset
raise WsResourceNotFoundError(f"Dataset with ID {dataset_id} not found")
def get_dataset_by_id(ctx: WsContext,
dataset_id: Union[dict, str]) -> Dataset:
"""Get dataset by ID."""
assert_not_none(dataset_id, name='dataset_id')
# The dataset_id may be a dataset json object
if isinstance(dataset_id, dict):
dataset_id = dataset_id['id']
dataset = ctx.db_driver.instance().get_dataset(dataset_id)
return dataset
# noinspection PyUnusedLocal,PyTypeChecker
def get_datasets_in_path(ctx: WsContext,
affil: str,
project: str,
cruise: str) -> List[DatasetRef]:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
# TODO (generated): implement operation get_datasets_in_bucket()
raise WsNotImplementedError('Operation get_datasets_in_bucket() not yet implemented')
# noinspection PyUnusedLocal,PyTypeChecker
def get_dataset_by_name(ctx: WsContext,
affil: str,
project: str,
cruise: str,
name: str) -> str:
assert_not_none(affil, name='affil')
assert_not_none(project, name='project')
assert_not_none(cruise, name='cruise')
assert_not_none(name, name='name')
# TODO (generated): implement operation get_dataset_by_bucket_and_name()
raise WsNotImplementedError('Operation get_dataset_by_bucket_and_name() not yet implemented')
# noinspection PyUnusedLocal
def get_dataset_qc_info(ctx: WsContext,
dataset_id: str) -> QcInfo:
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
qc_info_dict = dataset.metadata.get("qc_info")
return QcInfo.from_dict(qc_info_dict) if qc_info_dict else QcInfo(QC_STATUS_SUBMITTED)
# noinspection PyUnusedLocal
def set_dataset_qc_info(ctx: WsContext,
dataset_id: str,
qc_info: QcInfo):
assert_not_none(dataset_id, name='dataset_id')
dataset = ctx.db_driver.get_dataset(dataset_id)
dataset.metadata["qc_info"] = qc_info.to_dict()
ctx.db_driver.update_dataset(dataset)
| en | 0.672034 | # The MIT License (MIT) # Copyright (c) 2018 by EUMETSAT # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Find datasets. # Ensuring that the search uses lower case pnames Add a new dataset. Update an existing dataset. Delete an existing dataset. # assert_not_none(api_key, name='api_key') Get dataset by ID. Get dataset by ID. # The dataset_id may be a dataset json object # noinspection PyUnusedLocal,PyTypeChecker # TODO (generated): implement operation get_datasets_in_bucket() # noinspection PyUnusedLocal,PyTypeChecker # TODO (generated): implement operation get_dataset_by_bucket_and_name() # noinspection PyUnusedLocal # noinspection PyUnusedLocal | 1.402753 | 1 |
libAnt/node.py | ayanezcasal/AntLibAYC | 19 | 7418 | import threading
from queue import Queue, Empty
from time import sleep
from libAnt.drivers.driver import Driver
from libAnt.message import *
class Network:
def __init__(self, key: bytes = b'\x00' * 8, name: str = None):
self.key = key
self.name = name
self.number = 0
def __str__(self):
return self.name
class Pump(threading.Thread):
def __init__(self, driver: Driver, initMessages, out: Queue, onSucces, onFailure):
super().__init__()
self._stopper = threading.Event()
self._driver = driver
self._out = out
self._initMessages = initMessages
self._waiters = []
self._onSuccess = onSucces
self._onFailure = onFailure
def stop(self):
self._driver.abort()
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while not self.stopped():
try:
with self._driver as d:
# Startup
rst = SystemResetMessage()
self._waiters.append(rst)
d.write(rst)
for m in self._initMessages:
self._waiters.append(m)
d.write(m)
while not self.stopped():
# Write
try:
outMsg = self._out.get(block=False)
self._waiters.append(outMsg)
d.write(outMsg)
except Empty:
pass
# Read
try:
msg = d.read(timeout=1)
if msg.type == MESSAGE_CHANNEL_EVENT:
# This is a response to our outgoing message
for w in self._waiters:
if w.type == msg.content[1]: # ACK
self._waiters.remove(w)
# TODO: Call waiter callback from tuple (waiter, callback)
break
elif msg.type == MESSAGE_CHANNEL_BROADCAST_DATA:
bmsg = BroadcastMessage(msg.type, msg.content).build(msg.content)
self._onSuccess(bmsg)
except Empty:
pass
except Exception as e:
self._onFailure(e)
except:
pass
self._waiters.clear()
sleep(1)
class Node:
def __init__(self, driver: Driver, name: str = None):
self._driver = driver
self._name = name
self._out = Queue()
self._init = []
self._pump = None
self._configMessages = Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, onSuccess, onFailure):
if not self.isRunning():
self._pump = Pump(self._driver, self._init, self._out, onSuccess, onFailure)
self._pump.start()
def enableRxScanMode(self, networkKey=ANTPLUS_NETWORK_KEY, channelType=CHANNEL_TYPE_ONEWAY_RECEIVE,
frequency: int = 2457, rxTimestamp: bool = True, rssi: bool = True, channelId: bool = True):
self._init.append(SystemResetMessage())
self._init.append(SetNetworkKeyMessage(0, networkKey))
self._init.append(AssignChannelMessage(0, channelType))
self._init.append(SetChannelIdMessage(0))
self._init.append(SetChannelRfFrequencyMessage(0, frequency))
self._init.append(EnableExtendedMessagesMessage())
self._init.append(LibConfigMessage(rxTimestamp, rssi, channelId))
self._init.append(OpenRxScanModeMessage())
def stop(self):
if self.isRunning():
self._pump.stop()
self._pump.join()
def isRunning(self):
if self._pump is None:
return False
return self._pump.is_alive()
def getCapabilities(self):
pass
| import threading
from queue import Queue, Empty
from time import sleep
from libAnt.drivers.driver import Driver
from libAnt.message import *
class Network:
def __init__(self, key: bytes = b'\x00' * 8, name: str = None):
self.key = key
self.name = name
self.number = 0
def __str__(self):
return self.name
class Pump(threading.Thread):
def __init__(self, driver: Driver, initMessages, out: Queue, onSucces, onFailure):
super().__init__()
self._stopper = threading.Event()
self._driver = driver
self._out = out
self._initMessages = initMessages
self._waiters = []
self._onSuccess = onSucces
self._onFailure = onFailure
def stop(self):
self._driver.abort()
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while not self.stopped():
try:
with self._driver as d:
# Startup
rst = SystemResetMessage()
self._waiters.append(rst)
d.write(rst)
for m in self._initMessages:
self._waiters.append(m)
d.write(m)
while not self.stopped():
# Write
try:
outMsg = self._out.get(block=False)
self._waiters.append(outMsg)
d.write(outMsg)
except Empty:
pass
# Read
try:
msg = d.read(timeout=1)
if msg.type == MESSAGE_CHANNEL_EVENT:
# This is a response to our outgoing message
for w in self._waiters:
if w.type == msg.content[1]: # ACK
self._waiters.remove(w)
# TODO: Call waiter callback from tuple (waiter, callback)
break
elif msg.type == MESSAGE_CHANNEL_BROADCAST_DATA:
bmsg = BroadcastMessage(msg.type, msg.content).build(msg.content)
self._onSuccess(bmsg)
except Empty:
pass
except Exception as e:
self._onFailure(e)
except:
pass
self._waiters.clear()
sleep(1)
class Node:
def __init__(self, driver: Driver, name: str = None):
self._driver = driver
self._name = name
self._out = Queue()
self._init = []
self._pump = None
self._configMessages = Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, onSuccess, onFailure):
if not self.isRunning():
self._pump = Pump(self._driver, self._init, self._out, onSuccess, onFailure)
self._pump.start()
def enableRxScanMode(self, networkKey=ANTPLUS_NETWORK_KEY, channelType=CHANNEL_TYPE_ONEWAY_RECEIVE,
frequency: int = 2457, rxTimestamp: bool = True, rssi: bool = True, channelId: bool = True):
self._init.append(SystemResetMessage())
self._init.append(SetNetworkKeyMessage(0, networkKey))
self._init.append(AssignChannelMessage(0, channelType))
self._init.append(SetChannelIdMessage(0))
self._init.append(SetChannelRfFrequencyMessage(0, frequency))
self._init.append(EnableExtendedMessagesMessage())
self._init.append(LibConfigMessage(rxTimestamp, rssi, channelId))
self._init.append(OpenRxScanModeMessage())
def stop(self):
if self.isRunning():
self._pump.stop()
self._pump.join()
def isRunning(self):
if self._pump is None:
return False
return self._pump.is_alive()
def getCapabilities(self):
pass
| en | 0.736767 | # Startup # Write # Read # This is a response to our outgoing message # ACK # TODO: Call waiter callback from tuple (waiter, callback) | 2.60336 | 3 |
tests/test_seasonality.py | OliPerkins1987/Wildfire_Human_Agency_Model | 1 | 7419 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 12:17:04 2021
@author: Oli
"""
import pytest
import pandas as pd
import numpy as np
import netCDF4 as nc
import os
from copy import deepcopy
os.chdir(os.path.dirname(os.path.realpath(__file__)))
wd = os.getcwd().replace('\\', '/')
exec(open("test_setup.py").read())
os.chdir((wd[0:-6] + '/src/data_import'))
exec(open("local_load_up.py").read())
from model_interface.wham import WHAM
from Core_functionality.AFTs.agent_class import AFT
from Core_functionality.AFTs.arable_afts import Swidden, SOSH, MOSH, Intense_arable
from Core_functionality.AFTs.livestock_afts import Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p
from Core_functionality.AFTs.forestry_afts import Agroforestry, Logger, Managed_forestry, Abandoned_forestry
from Core_functionality.AFTs.nonex_afts import Hunter_gatherer, Recreationalist, SLM, Conservationist
from Core_functionality.AFTs.land_system_class import land_system
from Core_functionality.AFTs.land_systems import Cropland, Pasture, Rangeland, Forestry, Urban, Unoccupied, Nonex
from Core_functionality.top_down_processes.arson import arson
from Core_functionality.top_down_processes.background_ignitions import background_rate
from Core_functionality.top_down_processes.fire_constraints import fuel_ct, dominant_afr_ct
from Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree, update_pars, predict_from_tree_fast
from Core_functionality.prediction_tools.regression_families import regression_link, regression_transformation
#####################################################################
### Run model year then reproduce outputs
#####################################################################
### Run model for 1 year
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': False
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
mod_annual = deepcopy(mod.results['Managed_fire'][0]['Total'])
#######################
### Run model monthly
#######################
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Fire_seasonality': Seasonality,
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': True
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
##################################
### tests
##################################
def test_seasonality_mean():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
assert pytest.approx(np.nanmean(mod_annual)) == np.nanmean(seasonal)
def test_seasonality_quantiles():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
quants = [0, 0.2, 0.4, 0.5, 0.6, 0.8, 1]
assert pytest.approx(np.nanquantile(mod_annual, quants)) == np.nanquantile(seasonal, quants)
| # -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 12:17:04 2021
@author: Oli
"""
import pytest
import pandas as pd
import numpy as np
import netCDF4 as nc
import os
from copy import deepcopy
os.chdir(os.path.dirname(os.path.realpath(__file__)))
wd = os.getcwd().replace('\\', '/')
exec(open("test_setup.py").read())
os.chdir((wd[0:-6] + '/src/data_import'))
exec(open("local_load_up.py").read())
from model_interface.wham import WHAM
from Core_functionality.AFTs.agent_class import AFT
from Core_functionality.AFTs.arable_afts import Swidden, SOSH, MOSH, Intense_arable
from Core_functionality.AFTs.livestock_afts import Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p
from Core_functionality.AFTs.forestry_afts import Agroforestry, Logger, Managed_forestry, Abandoned_forestry
from Core_functionality.AFTs.nonex_afts import Hunter_gatherer, Recreationalist, SLM, Conservationist
from Core_functionality.AFTs.land_system_class import land_system
from Core_functionality.AFTs.land_systems import Cropland, Pasture, Rangeland, Forestry, Urban, Unoccupied, Nonex
from Core_functionality.top_down_processes.arson import arson
from Core_functionality.top_down_processes.background_ignitions import background_rate
from Core_functionality.top_down_processes.fire_constraints import fuel_ct, dominant_afr_ct
from Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree, update_pars, predict_from_tree_fast
from Core_functionality.prediction_tools.regression_families import regression_link, regression_transformation
#####################################################################
### Run model year then reproduce outputs
#####################################################################
### Run model for 1 year
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': False
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
mod_annual = deepcopy(mod.results['Managed_fire'][0]['Total'])
#######################
### Run model monthly
#######################
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Rangeland, Pasture, Forestry, Nonex, Unoccupied, Urban],
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Fire_seasonality': Seasonality,
'Observers': {'arson': arson, 'background_rate': background_rate},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 0,
'reporters': ['Managed_fire', 'Background_ignitions','Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': True
}
mod = WHAM(parameters)
### setup
mod.setup()
### go
mod.go()
##################################
### tests
##################################
def test_seasonality_mean():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
assert pytest.approx(np.nanmean(mod_annual)) == np.nanmean(seasonal)
def test_seasonality_quantiles():
seasonal = np.nansum(mod.results['Managed_fire'][0]['Total'], axis = 0)
quants = [0, 0.2, 0.4, 0.5, 0.6, 0.8, 1]
assert pytest.approx(np.nanquantile(mod_annual, quants)) == np.nanquantile(seasonal, quants)
| de | 0.711066 | # -*- coding: utf-8 -*- Created on Thu Sep 30 12:17:04 2021
@author: Oli ##################################################################### ### Run model year then reproduce outputs ##################################################################### ### Run model for 1 year ### setup ### go ####################### ### Run model monthly ####################### ### setup ### go ################################## ### tests ################################## | 1.591988 | 2 |
bookalo/funciones_report.py | unizar-30226-2019-08/Backend | 3 | 7420 | <gh_stars>1-10
from django.shortcuts import render, redirect
from bookalo.pyrebase_settings import db, auth
from bookalo.models import *
from bookalo.serializers import *
#from bookalo.functions import *
from rest_framework import status, permissions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from operator import itemgetter
from django.http import HttpResponse
from datetime import datetime, timedelta, timezone
from django.db.models import Q, Count
from django.contrib.gis.geoip2 import GeoIP2
from math import sin, cos, sqrt, atan2, radians
from decimal import Decimal
from django.core.mail import EmailMessage
from .funciones_chat import *
def CrearReport(reporteduserUid, cause, comment):
reporteduser = Usuario.objects.get(uid=reporteduserUid)
reporte = Report.objects.create(usuario_reportado=reporteduser, causa=cause, comentario=comment)
return reporte
def MandarCorreo(user,reporteduserUid, cause, comment, id_chat, pk_report):
try:
correo = '<EMAIL>'
reporteduser = Usuario.objects.get(uid=reporteduserUid)
mensaje = 'El usuario ' + reporteduser.nombre + ' con uid ' + reporteduser.uid + ' y una media de valoraciones de ' + str(reporteduser.media_valoraciones) + ', ha sido reportado por el usuario ' + user.nombre + ' con uid ' + user.uid + '\n\nCausa: ' + cause + '\nComentario del usuario: ' + comment + '.'
if id_chat != 'nothing':
chat = Chat.objects.get(id=int(id_chat))
mensaje = mensaje + '\n\nMensajes del chat:'
mensajes_chat = Mensaje.objects.filter(chat_asociado=chat).order_by('hora')
for m in mensajes_chat:
hora_mensaje = str(m.hora.year)+ '-' + str(m.hora.month) + '-' + str(m.hora.day) + ' a las ' + str(m.hora.hour) +':'+ str(m.hora.minute) +':'+ str(m.hora.second)
mensaje = mensaje +'\n' + "[" + m.emisor.nombre +', ' + hora_mensaje + "]" + ': ' + m.texto
mensaje = mensaje + "\nA continuación se te presentan las distintas acciones posibles que tienes como moderador:\n\n"
mensaje = mensaje + "Aceptar reporte: https://bookalo.es/api/accept_report?id=" + str(pk_report) + "\n\n"
mensaje = mensaje + "Rechazar reporte: https://bookalo.es/api/reject_report?id=" + str(pk_report) + "\n"
email = EmailMessage('Reporte de usuario ' + reporteduser.nombre, mensaje,
to=[correo])
email.send()
return True
except:
return False | from django.shortcuts import render, redirect
from bookalo.pyrebase_settings import db, auth
from bookalo.models import *
from bookalo.serializers import *
#from bookalo.functions import *
from rest_framework import status, permissions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from operator import itemgetter
from django.http import HttpResponse
from datetime import datetime, timedelta, timezone
from django.db.models import Q, Count
from django.contrib.gis.geoip2 import GeoIP2
from math import sin, cos, sqrt, atan2, radians
from decimal import Decimal
from django.core.mail import EmailMessage
from .funciones_chat import *
def CrearReport(reporteduserUid, cause, comment):
reporteduser = Usuario.objects.get(uid=reporteduserUid)
reporte = Report.objects.create(usuario_reportado=reporteduser, causa=cause, comentario=comment)
return reporte
def MandarCorreo(user,reporteduserUid, cause, comment, id_chat, pk_report):
try:
correo = '<EMAIL>'
reporteduser = Usuario.objects.get(uid=reporteduserUid)
mensaje = 'El usuario ' + reporteduser.nombre + ' con uid ' + reporteduser.uid + ' y una media de valoraciones de ' + str(reporteduser.media_valoraciones) + ', ha sido reportado por el usuario ' + user.nombre + ' con uid ' + user.uid + '\n\nCausa: ' + cause + '\nComentario del usuario: ' + comment + '.'
if id_chat != 'nothing':
chat = Chat.objects.get(id=int(id_chat))
mensaje = mensaje + '\n\nMensajes del chat:'
mensajes_chat = Mensaje.objects.filter(chat_asociado=chat).order_by('hora')
for m in mensajes_chat:
hora_mensaje = str(m.hora.year)+ '-' + str(m.hora.month) + '-' + str(m.hora.day) + ' a las ' + str(m.hora.hour) +':'+ str(m.hora.minute) +':'+ str(m.hora.second)
mensaje = mensaje +'\n' + "[" + m.emisor.nombre +', ' + hora_mensaje + "]" + ': ' + m.texto
mensaje = mensaje + "\nA continuación se te presentan las distintas acciones posibles que tienes como moderador:\n\n"
mensaje = mensaje + "Aceptar reporte: https://bookalo.es/api/accept_report?id=" + str(pk_report) + "\n\n"
mensaje = mensaje + "Rechazar reporte: https://bookalo.es/api/reject_report?id=" + str(pk_report) + "\n"
email = EmailMessage('Reporte de usuario ' + reporteduser.nombre, mensaje,
to=[correo])
email.send()
return True
except:
return False | en | 0.160162 | #from bookalo.functions import * | 1.967354 | 2 |
tests/test_client.py | patvdleer/nefit-client-python | 11 | 7421 | <filename>tests/test_client.py
import os
import unittest
from nefit import NefitClient, NefitResponseException
class ClientTest(unittest.TestCase):
def test_exceptions(self):
client = NefitClient(
os.environ.get("NEFIT_SERIAL", 123456789),
os.environ.get("NEFIT_ACCESS_KEY", "<KEY>"),
"asddasadsasdcx"
)
client.connect()
with self.assertRaises(NefitResponseException):
client.get_display_code()
client.disconnect()
client.force_disconnect()
if __name__ == '__main__':
unittest.main()
| <filename>tests/test_client.py
import os
import unittest
from nefit import NefitClient, NefitResponseException
class ClientTest(unittest.TestCase):
def test_exceptions(self):
client = NefitClient(
os.environ.get("NEFIT_SERIAL", 123456789),
os.environ.get("NEFIT_ACCESS_KEY", "<KEY>"),
"asddasadsasdcx"
)
client.connect()
with self.assertRaises(NefitResponseException):
client.get_display_code()
client.disconnect()
client.force_disconnect()
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.750546 | 3 |
|
opencivicdata/merge.py | GovHawkDC/python-opencivicdata | 0 | 7422 | import datetime
from django.db import transaction
def compute_diff(obj1, obj2):
"""
Given two objects compute a list of differences.
Each diff dict has the following keys:
field - name of the field
new - the new value for the field
one - value of the field in obj1
two - value of the field in obj2
diff - none|one|two|new
list - true if field is a list of related objects
"""
comparison = []
fields = obj1._meta.get_fields()
exclude = ('created_at', 'updated_at', 'id', 'locked_fields')
if obj1 == obj2:
raise ValueError('cannot merge object with itself')
for field in fields:
if field.name in exclude:
continue
elif not field.is_relation:
piece_one = getattr(obj1, field.name)
piece_two = getattr(obj2, field.name)
if piece_one == piece_two:
diff = 'none'
new = piece_one
elif piece_one:
diff = 'one'
new = piece_one
elif piece_two:
diff = 'two'
new = piece_two
comparison.append({
'field': field.name,
'new': new,
'one': getattr(obj1, field.name),
'two': getattr(obj2, field.name),
'diff': diff,
'list': False,
})
else:
related_name = field.get_accessor_name()
piece_one = list(getattr(obj1, related_name).all())
piece_two = list(getattr(obj2, related_name).all())
# TODO: try and deduplicate the lists?
new = piece_one + piece_two
diff = 'none' if piece_one == piece_two else 'one'
if (field.name == 'other_names' and obj1.name != obj2.name):
new.append(field.related_model(name=obj2.name,
note='from merge w/ ' + obj2.id)
)
diff = 'new'
if field.name == 'identifiers':
new.append(field.related_model(identifier=obj2.id))
diff = 'new'
if field.name == 'memberships':
new = _dedupe_memberships(new)
comparison.append({
'field': related_name,
'new': new,
'one': piece_one,
'two': piece_two,
'diff': diff,
'list': True,
})
comparison.append({'field': 'created_at',
'new': min(obj1.created_at, obj2.created_at),
'one': obj1.created_at,
'two': obj2.created_at,
'diff': 'one' if obj1.created_at < obj2.created_at else 'two',
'list': False,
})
comparison.append({'field': 'updated_at',
'new': datetime.datetime.utcnow(),
'one': obj1.updated_at,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
# locked fields are any fields that change that aren't M2M relations
# (ending in _set)
new_locked_fields = obj1.locked_fields + obj2.locked_fields + [
c['field'] for c in comparison if c['diff'] != 'none' and not c['field'].endswith('_set')
]
new_locked_fields = set(new_locked_fields) - {'updated_at', 'created_at'}
comparison.append({'field': 'locked_fields',
'new': list(new_locked_fields),
'one': obj1.locked_fields,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
return comparison
@transaction.atomic
def apply_diff(obj1, obj2, diff):
for row in diff:
if row['diff'] != 'none':
if row['list']:
# save items, the ids have been set to obj1
for item in row['new']:
setattr(item,
getattr(obj1, row['field']).field.name,
obj1)
item.save()
else:
setattr(obj1, row['field'], row['new'])
obj1.save()
count, delete_plan = obj2.delete()
if count > 1:
# shouldn't happen, but let's be sure
raise AssertionError('deletion failed due to related objects left unmerged')
def merge(obj1, obj2):
diff = compute_diff(obj1, obj2)
apply_diff(obj1, obj2, diff)
def _dedupe_memberships(memberships):
deduped = []
mset = set()
for membership in memberships:
mkey = (membership.organization_id,
membership.label,
membership.end_date,
membership.post_id)
if mkey not in mset:
deduped.append(membership)
mset.add(mkey)
else:
membership.delete()
return deduped
| import datetime
from django.db import transaction
def compute_diff(obj1, obj2):
"""
Given two objects compute a list of differences.
Each diff dict has the following keys:
field - name of the field
new - the new value for the field
one - value of the field in obj1
two - value of the field in obj2
diff - none|one|two|new
list - true if field is a list of related objects
"""
comparison = []
fields = obj1._meta.get_fields()
exclude = ('created_at', 'updated_at', 'id', 'locked_fields')
if obj1 == obj2:
raise ValueError('cannot merge object with itself')
for field in fields:
if field.name in exclude:
continue
elif not field.is_relation:
piece_one = getattr(obj1, field.name)
piece_two = getattr(obj2, field.name)
if piece_one == piece_two:
diff = 'none'
new = piece_one
elif piece_one:
diff = 'one'
new = piece_one
elif piece_two:
diff = 'two'
new = piece_two
comparison.append({
'field': field.name,
'new': new,
'one': getattr(obj1, field.name),
'two': getattr(obj2, field.name),
'diff': diff,
'list': False,
})
else:
related_name = field.get_accessor_name()
piece_one = list(getattr(obj1, related_name).all())
piece_two = list(getattr(obj2, related_name).all())
# TODO: try and deduplicate the lists?
new = piece_one + piece_two
diff = 'none' if piece_one == piece_two else 'one'
if (field.name == 'other_names' and obj1.name != obj2.name):
new.append(field.related_model(name=obj2.name,
note='from merge w/ ' + obj2.id)
)
diff = 'new'
if field.name == 'identifiers':
new.append(field.related_model(identifier=obj2.id))
diff = 'new'
if field.name == 'memberships':
new = _dedupe_memberships(new)
comparison.append({
'field': related_name,
'new': new,
'one': piece_one,
'two': piece_two,
'diff': diff,
'list': True,
})
comparison.append({'field': 'created_at',
'new': min(obj1.created_at, obj2.created_at),
'one': obj1.created_at,
'two': obj2.created_at,
'diff': 'one' if obj1.created_at < obj2.created_at else 'two',
'list': False,
})
comparison.append({'field': 'updated_at',
'new': datetime.datetime.utcnow(),
'one': obj1.updated_at,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
# locked fields are any fields that change that aren't M2M relations
# (ending in _set)
new_locked_fields = obj1.locked_fields + obj2.locked_fields + [
c['field'] for c in comparison if c['diff'] != 'none' and not c['field'].endswith('_set')
]
new_locked_fields = set(new_locked_fields) - {'updated_at', 'created_at'}
comparison.append({'field': 'locked_fields',
'new': list(new_locked_fields),
'one': obj1.locked_fields,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
return comparison
@transaction.atomic
def apply_diff(obj1, obj2, diff):
for row in diff:
if row['diff'] != 'none':
if row['list']:
# save items, the ids have been set to obj1
for item in row['new']:
setattr(item,
getattr(obj1, row['field']).field.name,
obj1)
item.save()
else:
setattr(obj1, row['field'], row['new'])
obj1.save()
count, delete_plan = obj2.delete()
if count > 1:
# shouldn't happen, but let's be sure
raise AssertionError('deletion failed due to related objects left unmerged')
def merge(obj1, obj2):
diff = compute_diff(obj1, obj2)
apply_diff(obj1, obj2, diff)
def _dedupe_memberships(memberships):
deduped = []
mset = set()
for membership in memberships:
mkey = (membership.organization_id,
membership.label,
membership.end_date,
membership.post_id)
if mkey not in mset:
deduped.append(membership)
mset.add(mkey)
else:
membership.delete()
return deduped
| en | 0.94117 | Given two objects compute a list of differences. Each diff dict has the following keys: field - name of the field new - the new value for the field one - value of the field in obj1 two - value of the field in obj2 diff - none|one|two|new list - true if field is a list of related objects # TODO: try and deduplicate the lists? # locked fields are any fields that change that aren't M2M relations # (ending in _set) # save items, the ids have been set to obj1 # shouldn't happen, but let's be sure | 2.842014 | 3 |
src/python/pants/core/project_info/filedeps.py | silverguo/pants | 0 | 7423 | <reponame>silverguo/pants<filename>src/python/pants/core/project_info/filedeps.py
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from pathlib import PurePath
from typing import Iterable
from pants.base.build_root import BuildRoot
from pants.engine.addresses import Address, Addresses, BuildFileAddress
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import (
HydratedSources,
HydrateSourcesRequest,
Sources,
Target,
Targets,
TransitiveTargets,
)
class FiledepsOptions(LineOriented, GoalSubsystem):
"""List all source and BUILD files a target depends on."""
name = "filedeps2"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--absolute",
type=bool,
default=True,
help=(
"If True, output with absolute path; else, output with path relative to the "
"build root."
),
)
register(
"--globs",
type=bool,
default=False,
help=(
"Instead of outputting filenames, output the original globs used in the BUILD "
"file. This will not include exclude globs (i.e. globs that start with `!`)."
),
)
register(
"--transitive",
type=bool,
default=False,
help="If True, include the files used by dependencies in the output.",
)
class Filedeps(Goal):
subsystem_cls = FiledepsOptions
@goal_rule
async def file_deps(
console: Console, options: FiledepsOptions, build_root: BuildRoot, addresses: Addresses,
) -> Filedeps:
targets: Iterable[Target]
if options.values.transitive:
transitive_targets = await Get[TransitiveTargets](Addresses, addresses)
targets = transitive_targets.closure
else:
targets = await Get[Targets](Addresses, addresses)
build_file_addresses = await MultiGet(
Get[BuildFileAddress](Address, tgt.address) for tgt in targets
)
unique_rel_paths = {bfa.rel_path for bfa in build_file_addresses}
if options.values.globs:
unique_rel_paths.update(
itertools.chain.from_iterable(tgt.get(Sources).filespec["globs"] for tgt in targets)
)
else:
all_hydrated_sources = await MultiGet(
Get[HydratedSources](HydrateSourcesRequest, tgt.get(Sources).request) for tgt in targets
)
unique_rel_paths.update(
itertools.chain.from_iterable(
hydrated_sources.snapshot.files for hydrated_sources in all_hydrated_sources
)
)
with options.line_oriented(console) as print_stdout:
for rel_path in sorted(unique_rel_paths):
final_path = (
PurePath(build_root.path, rel_path).as_posix()
if options.values.absolute
else rel_path
)
print_stdout(final_path)
return Filedeps(exit_code=0)
def rules():
return [file_deps]
| # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from pathlib import PurePath
from typing import Iterable
from pants.base.build_root import BuildRoot
from pants.engine.addresses import Address, Addresses, BuildFileAddress
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import (
HydratedSources,
HydrateSourcesRequest,
Sources,
Target,
Targets,
TransitiveTargets,
)
class FiledepsOptions(LineOriented, GoalSubsystem):
"""List all source and BUILD files a target depends on."""
name = "filedeps2"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--absolute",
type=bool,
default=True,
help=(
"If True, output with absolute path; else, output with path relative to the "
"build root."
),
)
register(
"--globs",
type=bool,
default=False,
help=(
"Instead of outputting filenames, output the original globs used in the BUILD "
"file. This will not include exclude globs (i.e. globs that start with `!`)."
),
)
register(
"--transitive",
type=bool,
default=False,
help="If True, include the files used by dependencies in the output.",
)
class Filedeps(Goal):
subsystem_cls = FiledepsOptions
@goal_rule
async def file_deps(
console: Console, options: FiledepsOptions, build_root: BuildRoot, addresses: Addresses,
) -> Filedeps:
targets: Iterable[Target]
if options.values.transitive:
transitive_targets = await Get[TransitiveTargets](Addresses, addresses)
targets = transitive_targets.closure
else:
targets = await Get[Targets](Addresses, addresses)
build_file_addresses = await MultiGet(
Get[BuildFileAddress](Address, tgt.address) for tgt in targets
)
unique_rel_paths = {bfa.rel_path for bfa in build_file_addresses}
if options.values.globs:
unique_rel_paths.update(
itertools.chain.from_iterable(tgt.get(Sources).filespec["globs"] for tgt in targets)
)
else:
all_hydrated_sources = await MultiGet(
Get[HydratedSources](HydrateSourcesRequest, tgt.get(Sources).request) for tgt in targets
)
unique_rel_paths.update(
itertools.chain.from_iterable(
hydrated_sources.snapshot.files for hydrated_sources in all_hydrated_sources
)
)
with options.line_oriented(console) as print_stdout:
for rel_path in sorted(unique_rel_paths):
final_path = (
PurePath(build_root.path, rel_path).as_posix()
if options.values.absolute
else rel_path
)
print_stdout(final_path)
return Filedeps(exit_code=0)
def rules():
return [file_deps] | en | 0.604549 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). List all source and BUILD files a target depends on. | 2.039389 | 2 |
perfkitbenchmarker/providers/rackspace/rackspace_network.py | dq922/CloudControlVM | 0 | 7424 | <reponame>dq922/CloudControlVM
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Rackspace VM networking.
The SecurityGroup class provides a way of opening VM ports. The Network class
allows VMs to communicate via internal IPs.
"""
import json
import os
import threading
from perfkitbenchmarker import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.rackspace import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
SSH_PORT = 22
class RackspaceSecurityGroup(network.BaseFirewall):
"""An object representing the Rackspace Security Group."""
CLOUD = providers.RACKSPACE
def __init__(self):
"""Initialize Rackspace security group class."""
self._lock = threading.Lock()
self.firewall_names = set()
self.sg_counter = 0
def AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static or not FLAGS.use_security_group or port == SSH_PORT:
return
with self._lock:
firewall_name = ('perfkit-firewall-%s-%d-%d' %
(FLAGS.run_uri, port, self.sg_counter))
self.sg_counter += 1
if firewall_name in self.firewall_names:
return
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-create'])
firewall_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.add(firewall_name)
for protocol in ['tcp', 'udp']:
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', protocol,
'--port-range-min', str(port),
'--port-range-max', str(port)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', 'tcp',
'--port-range-min', str(SSH_PORT),
'--port-range-max', str(SSH_PORT)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
getport_cmd = []
getport_cmd.extend([FLAGS.neutron_path, 'port-list',
'--format', 'table'])
stdout, _ = vm_util.IssueRetryableCommand(getport_cmd,
env=firewall_env)
attrs = stdout.split('\n')
for attr in attrs:
if vm.ip_address in attr or vm.ip_address6 in attr:
port_id = [v.strip() for v in attr.split('|') if v != ''][0]
if port_id != '':
break
if not port_id:
raise ValueError('Could not find port_id from response.')
updateport_cmd = []
updateport_cmd.extend([FLAGS.neutron_path, 'port-update'])
for firewall in self.firewall_names:
updateport_cmd.extend(['--security-group', firewall])
updateport_cmd.append(port_id)
vm_util.IssueRetryableCommand(updateport_cmd, env=firewall_env)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
for firewall in self.firewall_names:
firewall_cmd = []
firewall_cmd.extend([FLAGS.neutron_path,
'security-group-show',
'--format', 'value'])
firewall_cmd.append(firewall)
stdout, _ = vm_util.IssueRetryableCommand(firewall_cmd,
env=firewall_env)
rules = [v for v in stdout.split('\n') if v != ''][2:-1]
for rule in rules:
rule_id = str(json.loads(rule)['id'])
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-delete'])
rule_cmd.append(rule_id)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-delete'])
firewall_cmd.append(firewall)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.remove(firewall)
| # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Rackspace VM networking.
The SecurityGroup class provides a way of opening VM ports. The Network class
allows VMs to communicate via internal IPs.
"""
import json
import os
import threading
from perfkitbenchmarker import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.rackspace import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
SSH_PORT = 22
class RackspaceSecurityGroup(network.BaseFirewall):
"""An object representing the Rackspace Security Group."""
CLOUD = providers.RACKSPACE
def __init__(self):
"""Initialize Rackspace security group class."""
self._lock = threading.Lock()
self.firewall_names = set()
self.sg_counter = 0
def AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static or not FLAGS.use_security_group or port == SSH_PORT:
return
with self._lock:
firewall_name = ('perfkit-firewall-%s-%d-%d' %
(FLAGS.run_uri, port, self.sg_counter))
self.sg_counter += 1
if firewall_name in self.firewall_names:
return
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-create'])
firewall_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.add(firewall_name)
for protocol in ['tcp', 'udp']:
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', protocol,
'--port-range-min', str(port),
'--port-range-max', str(port)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-create',
'--direction', 'ingress',
'--ethertype', 'IPv4',
'--protocol', 'tcp',
'--port-range-min', str(SSH_PORT),
'--port-range-max', str(SSH_PORT)])
rule_cmd.append(firewall_name)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
getport_cmd = []
getport_cmd.extend([FLAGS.neutron_path, 'port-list',
'--format', 'table'])
stdout, _ = vm_util.IssueRetryableCommand(getport_cmd,
env=firewall_env)
attrs = stdout.split('\n')
for attr in attrs:
if vm.ip_address in attr or vm.ip_address6 in attr:
port_id = [v.strip() for v in attr.split('|') if v != ''][0]
if port_id != '':
break
if not port_id:
raise ValueError('Could not find port_id from response.')
updateport_cmd = []
updateport_cmd.extend([FLAGS.neutron_path, 'port-update'])
for firewall in self.firewall_names:
updateport_cmd.extend(['--security-group', firewall])
updateport_cmd.append(port_id)
vm_util.IssueRetryableCommand(updateport_cmd, env=firewall_env)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
firewall_env = dict(os.environ.copy(),
**util.GetDefaultRackspaceNeutronEnv(self))
for firewall in self.firewall_names:
firewall_cmd = []
firewall_cmd.extend([FLAGS.neutron_path,
'security-group-show',
'--format', 'value'])
firewall_cmd.append(firewall)
stdout, _ = vm_util.IssueRetryableCommand(firewall_cmd,
env=firewall_env)
rules = [v for v in stdout.split('\n') if v != ''][2:-1]
for rule in rules:
rule_id = str(json.loads(rule)['id'])
rule_cmd = []
rule_cmd.extend([FLAGS.neutron_path,
'security-group-rule-delete'])
rule_cmd.append(rule_id)
vm_util.IssueRetryableCommand(rule_cmd, env=firewall_env)
firewall_cmd = [FLAGS.neutron_path]
firewall_cmd.extend(['security-group-delete'])
firewall_cmd.append(firewall)
vm_util.IssueRetryableCommand(firewall_cmd, env=firewall_env)
self.firewall_names.remove(firewall) | en | 0.807899 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Module containing classes related to Rackspace VM networking. The SecurityGroup class provides a way of opening VM ports. The Network class allows VMs to communicate via internal IPs. An object representing the Rackspace Security Group. Initialize Rackspace security group class. Opens a port on the firewall. Args: vm: The BaseVirtualMachine object to open the port for. port: The local port to open. Closes all ports on the firewall. | 2.192738 | 2 |
auth0_client/menu/datafiles/scripts/get_active_user_count.py | rubelw/auth0_client | 2 | 7425 | #!/usr/bin/env python
import json
from auth0_client.Auth0Client import Auth0Client
from auth0_client.menu.menu_helper.common import *
from auth0_client.menu.menu_helper.pretty import *
try:
users = {}
client = Auth0Client(auth_config())
results = client.active_users()
print(pretty(results))
except (KeyboardInterrupt, SystemExit):
sys.exit()
| #!/usr/bin/env python
import json
from auth0_client.Auth0Client import Auth0Client
from auth0_client.menu.menu_helper.common import *
from auth0_client.menu.menu_helper.pretty import *
try:
users = {}
client = Auth0Client(auth_config())
results = client.active_users()
print(pretty(results))
except (KeyboardInterrupt, SystemExit):
sys.exit()
| ru | 0.26433 | #!/usr/bin/env python | 1.939291 | 2 |
encryptfinance/transactions/admin.py | dark-codr/encryptfinance | 0 | 7426 | from __future__ import absolute_import
from django.contrib import admin
from .models import Deposit, Withdrawal, Support
from .forms import DepositForm, WithdrawalForm
# Register your models here.
@admin.register(Deposit)
class DepositAdmin(admin.ModelAdmin):
# form = DepositForm
list_display = ["__str__", "amount", "approval", "deposited", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "amount", "deposited"]
class Meta:
model = Deposit
@admin.register(Withdrawal)
class WithdrawalAdmin(admin.ModelAdmin):
form = WithdrawalForm
list_display = ["__str__", "amount", "wallet_id", "approval", "withdrawn", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "withdrawn"]
class Meta:
model = Withdrawal
admin.site.register(Support)
| from __future__ import absolute_import
from django.contrib import admin
from .models import Deposit, Withdrawal, Support
from .forms import DepositForm, WithdrawalForm
# Register your models here.
@admin.register(Deposit)
class DepositAdmin(admin.ModelAdmin):
# form = DepositForm
list_display = ["__str__", "amount", "approval", "deposited", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "amount", "deposited"]
class Meta:
model = Deposit
@admin.register(Withdrawal)
class WithdrawalAdmin(admin.ModelAdmin):
form = WithdrawalForm
list_display = ["__str__", "amount", "wallet_id", "approval", "withdrawn", "created"]
list_filter = ["approval", "created"]
list_editable = ["approval", "withdrawn"]
class Meta:
model = Withdrawal
admin.site.register(Support)
| en | 0.9289 | # Register your models here. # form = DepositForm | 1.906369 | 2 |
vendor-local/src/django-piston/tests/test_project/settings.py | jlin/inventory | 22 | 7427 | import os
DEBUG = True
DATABASES = {
'default':
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/piston.db'
}
}
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = '/tmp/piston.db'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'piston',
'test_project.apps.testapp',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
ROOT_URLCONF = 'test_project.urls'
MIDDLEWARE_CLASSES = (
'piston.middleware.ConditionalMiddlewareCompatProxy',
'django.contrib.sessions.middleware.SessionMiddleware',
'piston.middleware.CommonMiddlewareCompatProxy',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
| import os
DEBUG = True
DATABASES = {
'default':
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/piston.db'
}
}
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = '/tmp/piston.db'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'piston',
'test_project.apps.testapp',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
ROOT_URLCONF = 'test_project.urls'
MIDDLEWARE_CLASSES = (
'piston.middleware.ConditionalMiddlewareCompatProxy',
'django.contrib.sessions.middleware.SessionMiddleware',
'piston.middleware.CommonMiddlewareCompatProxy',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
| none | 1 | 1.645625 | 2 |
|
src/sage/combinat/combinatorial_map.py | UCD4IDS/sage | 0 | 7428 | <gh_stars>0
"""
Combinatorial maps
This module provides a decorator that can be used to add semantic to a
Python method by marking it as implementing a *combinatorial map*,
that is a map between two :class:`enumerated sets <EnumeratedSets>`::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
By default, this decorator is a no-op: it returns the decorated method
as is::
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
See :func:`combinatorial_map_wrapper` for the various options this
decorator can take.
Projects built on top of Sage are welcome to customize locally this
hook to instrument the Sage code and exploit this semantic
information. Typically, the decorator could be used to populate a
database of maps. For a real-life application, see the project
`FindStat <http://findstat.org/>`. As a basic example, a variant of
the decorator is provided as :func:`combinatorial_map_wrapper`; it
wraps the decorated method, so that one can later use
:func:`combinatorial_maps_in_class` to query an object, or class
thereof, for all the combinatorial maps that apply to it.
.. NOTE::
Since decorators are evaluated upon loading Python modules,
customizing :obj:`combinatorial map` needs to be done before the
modules using it are loaded. In the examples below, where we
illustrate the customized ``combinatorial_map`` decorator on the
:mod:`sage.combinat.permutation` module, we resort to force a
reload of this module after dynamically changing
``sage.combinat.combinatorial_map.combinatorial_map``. This is
good enough for those doctests, but remains fragile.
For real use cases, it is probably best to just edit this source
file statically (see below).
"""
# ****************************************************************************
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
def combinatorial_map_trivial(f=None, order=None, name=None):
r"""
Combinatorial map decorator
See :ref:`sage.combinat.combinatorial_map` for a description of
this decorator and its purpose. This default implementation does
nothing.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- ``f`` unchanged
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_trivial as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: # ... code ...
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
sage: MyPermutation.descent_set
<function MyPermutation.descent_set at ...>
"""
if f is None:
return lambda f: f
else:
return f
def combinatorial_map_wrapper(f=None, order=None, name=None):
r"""
Combinatorial map decorator (basic example).
See :ref:`sage.combinat.combinatorial_map` for a description of
the ``combinatorial_map`` decorator and its purpose. This
implementation, together with :func:`combinatorial_maps_in_class`
illustrates how to use this decorator as a hook to instrument the
Sage code.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- A combinatorial map. This is an instance of the :class:`CombinatorialMap`.
EXAMPLES:
We define a class illustrating the use of this implementation of
the :obj:`combinatorial_map` decorator with its various arguments::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: pass
....: @combinatorial_map(order=2)
....: def inverse(self):
....: '''
....: The inverse of the permutation
....: '''
....: pass
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: pass
....: def major_index(self):
....: '''
....: The major index of the permutation
....: '''
....: pass
sage: MyPermutation.reverse
Combinatorial map: reverse
sage: MyPermutation.descent_set
Combinatorial map: descent set of permutation
sage: MyPermutation.inverse
Combinatorial map: inverse
One can now determine all the combinatorial maps associated with a
given object as follows::
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: X = combinatorial_maps_in_class(MyPermutation); X # random
[Combinatorial map: reverse,
Combinatorial map: descent set of permutation,
Combinatorial map: inverse]
The method ``major_index`` defined about is not a combinatorial map::
sage: MyPermutation.major_index
<function MyPermutation.major_index at ...>
But one can define a function that turns ``major_index`` into a combinatorial map::
sage: def major_index(p):
....: return p.major_index()
sage: major_index
<function major_index at ...>
sage: combinatorial_map(major_index)
Combinatorial map: major_index
"""
if f is None:
return lambda f: CombinatorialMap(f, order=order, name=name)
else:
return CombinatorialMap(f, order=order, name=name)
##############################################################################
# Edit here to customize the combinatorial_map hook
##############################################################################
combinatorial_map = combinatorial_map_trivial
# combinatorial_map = combinatorial_map_wrapper
class CombinatorialMap(object):
r"""
This is a wrapper class for methods that are *combinatorial maps*.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
"""
def __init__(self, f, order=None, name=None):
"""
Constructor for combinatorial maps.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: def f(x):
....: "doc of f"
....: return x
sage: x = combinatorial_map(f); x
Combinatorial map: f
sage: x.__doc__
'doc of f'
sage: x.__name__
'f'
sage: x.__module__
'__main__'
"""
import types
if not isinstance(f, types.FunctionType):
raise ValueError("Only plain functions are supported")
self._f = f
self._order = order
self._name = name
if hasattr(f, "__doc__"):
self.__doc__ = f.__doc__
if hasattr(f, "__name__"):
self.__name__ = f.__name__
else:
self.__name__ = "..."
if hasattr(f, "__module__"):
self.__module__ = f.__module__
def __repr__(self):
"""
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau.__repr__()
'Combinatorial map: Robinson-Schensted insertion tableau'
"""
return "Combinatorial map: %s" % self.name()
def _sage_src_lines_(self):
r"""
Return the source code location for the wrapped function.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = p.left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: (src, lines) = cm._sage_src_lines_()
sage: src[0]
" @combinatorial_map(name='Robinson-Schensted insertion tableau')\n"
sage: lines # random
2653
"""
from sage.misc.sageinspect import sage_getsourcelines
return sage_getsourcelines(self._f)
def __get__(self, inst, cls=None):
"""
Bounds the method of self to the given instance.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau #indirect doctest
Combinatorial map: Robinson-Schensted insertion tableau
"""
self._inst = inst
return self
def __call__(self, *args, **kwds):
"""
Calls the combinatorial map.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = type(p).left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: cm(p)
[[1, 2, 4], [3]]
sage: cm(Permutation([4,3,2,1]))
[[1], [2], [3], [4]]
"""
if self._inst is not None:
return self._f(self._inst, *args, **kwds)
else:
return self._f(*args, **kwds)
def unbounded_map(self):
r"""
Return the unbounded version of ``self``.
You can use this method to return a function which takes as input
an element in the domain of the combinatorial map.
See the example below.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.permutation import Permutation
sage: pi = Permutation([1,3,2])
sage: f = pi.reverse
sage: F = f.unbounded_map()
sage: F(pi)
[2, 3, 1]
"""
return self._f
def order(self):
"""
Returns the order of ``self``, or ``None`` if the order is not known.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(order=2)
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.order()
2
sage: CombinatorialClass.to_self_2.order() is None
True
"""
return self._order
def name(self):
"""
Returns the name of a combinatorial map.
This is used for the string representation of ``self``.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(name='map1')
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.name()
'map1'
sage: CombinatorialClass.to_self_2.name()
'to_self_2'
"""
if self._name is not None:
return self._name
else:
return self._f.__name__
def combinatorial_maps_in_class(cls):
"""
Return the combinatorial maps of the class as a list of combinatorial maps.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: p = Permutation([1,3,2,4])
sage: cmaps = combinatorial_maps_in_class(p)
sage: cmaps # random
[Combinatorial map: Robinson-Schensted insertion tableau,
Combinatorial map: Robinson-Schensted recording tableau,
Combinatorial map: Robinson-Schensted tableau shape,
Combinatorial map: complement,
Combinatorial map: descent composition,
Combinatorial map: inverse, ...]
sage: p.left_tableau in cmaps
True
sage: p.right_tableau in cmaps
True
sage: p.complement in cmaps
True
"""
result = set()
for method in dir(cls):
entry = getattr(cls, method)
if isinstance(entry, CombinatorialMap):
result.add(entry)
return list(result)
| """
Combinatorial maps
This module provides a decorator that can be used to add semantic to a
Python method by marking it as implementing a *combinatorial map*,
that is a map between two :class:`enumerated sets <EnumeratedSets>`::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
By default, this decorator is a no-op: it returns the decorated method
as is::
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
See :func:`combinatorial_map_wrapper` for the various options this
decorator can take.
Projects built on top of Sage are welcome to customize locally this
hook to instrument the Sage code and exploit this semantic
information. Typically, the decorator could be used to populate a
database of maps. For a real-life application, see the project
`FindStat <http://findstat.org/>`. As a basic example, a variant of
the decorator is provided as :func:`combinatorial_map_wrapper`; it
wraps the decorated method, so that one can later use
:func:`combinatorial_maps_in_class` to query an object, or class
thereof, for all the combinatorial maps that apply to it.
.. NOTE::
Since decorators are evaluated upon loading Python modules,
customizing :obj:`combinatorial map` needs to be done before the
modules using it are loaded. In the examples below, where we
illustrate the customized ``combinatorial_map`` decorator on the
:mod:`sage.combinat.permutation` module, we resort to force a
reload of this module after dynamically changing
``sage.combinat.combinatorial_map.combinatorial_map``. This is
good enough for those doctests, but remains fragile.
For real use cases, it is probably best to just edit this source
file statically (see below).
"""
# ****************************************************************************
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
def combinatorial_map_trivial(f=None, order=None, name=None):
r"""
Combinatorial map decorator
See :ref:`sage.combinat.combinatorial_map` for a description of
this decorator and its purpose. This default implementation does
nothing.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- ``f`` unchanged
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_trivial as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: # ... code ...
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
sage: MyPermutation.descent_set
<function MyPermutation.descent_set at ...>
"""
if f is None:
return lambda f: f
else:
return f
def combinatorial_map_wrapper(f=None, order=None, name=None):
r"""
Combinatorial map decorator (basic example).
See :ref:`sage.combinat.combinatorial_map` for a description of
the ``combinatorial_map`` decorator and its purpose. This
implementation, together with :func:`combinatorial_maps_in_class`
illustrates how to use this decorator as a hook to instrument the
Sage code.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- A combinatorial map. This is an instance of the :class:`CombinatorialMap`.
EXAMPLES:
We define a class illustrating the use of this implementation of
the :obj:`combinatorial_map` decorator with its various arguments::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: pass
....: @combinatorial_map(order=2)
....: def inverse(self):
....: '''
....: The inverse of the permutation
....: '''
....: pass
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: pass
....: def major_index(self):
....: '''
....: The major index of the permutation
....: '''
....: pass
sage: MyPermutation.reverse
Combinatorial map: reverse
sage: MyPermutation.descent_set
Combinatorial map: descent set of permutation
sage: MyPermutation.inverse
Combinatorial map: inverse
One can now determine all the combinatorial maps associated with a
given object as follows::
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: X = combinatorial_maps_in_class(MyPermutation); X # random
[Combinatorial map: reverse,
Combinatorial map: descent set of permutation,
Combinatorial map: inverse]
The method ``major_index`` defined about is not a combinatorial map::
sage: MyPermutation.major_index
<function MyPermutation.major_index at ...>
But one can define a function that turns ``major_index`` into a combinatorial map::
sage: def major_index(p):
....: return p.major_index()
sage: major_index
<function major_index at ...>
sage: combinatorial_map(major_index)
Combinatorial map: major_index
"""
if f is None:
return lambda f: CombinatorialMap(f, order=order, name=name)
else:
return CombinatorialMap(f, order=order, name=name)
##############################################################################
# Edit here to customize the combinatorial_map hook
##############################################################################
combinatorial_map = combinatorial_map_trivial
# combinatorial_map = combinatorial_map_wrapper
class CombinatorialMap(object):
r"""
This is a wrapper class for methods that are *combinatorial maps*.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
"""
def __init__(self, f, order=None, name=None):
"""
Constructor for combinatorial maps.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: def f(x):
....: "doc of f"
....: return x
sage: x = combinatorial_map(f); x
Combinatorial map: f
sage: x.__doc__
'doc of f'
sage: x.__name__
'f'
sage: x.__module__
'__main__'
"""
import types
if not isinstance(f, types.FunctionType):
raise ValueError("Only plain functions are supported")
self._f = f
self._order = order
self._name = name
if hasattr(f, "__doc__"):
self.__doc__ = f.__doc__
if hasattr(f, "__name__"):
self.__name__ = f.__name__
else:
self.__name__ = "..."
if hasattr(f, "__module__"):
self.__module__ = f.__module__
def __repr__(self):
"""
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau.__repr__()
'Combinatorial map: Robinson-Schensted insertion tableau'
"""
return "Combinatorial map: %s" % self.name()
def _sage_src_lines_(self):
r"""
Return the source code location for the wrapped function.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = p.left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: (src, lines) = cm._sage_src_lines_()
sage: src[0]
" @combinatorial_map(name='Robinson-Schensted insertion tableau')\n"
sage: lines # random
2653
"""
from sage.misc.sageinspect import sage_getsourcelines
return sage_getsourcelines(self._f)
def __get__(self, inst, cls=None):
"""
Bounds the method of self to the given instance.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau #indirect doctest
Combinatorial map: Robinson-Schensted insertion tableau
"""
self._inst = inst
return self
def __call__(self, *args, **kwds):
"""
Calls the combinatorial map.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = type(p).left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: cm(p)
[[1, 2, 4], [3]]
sage: cm(Permutation([4,3,2,1]))
[[1], [2], [3], [4]]
"""
if self._inst is not None:
return self._f(self._inst, *args, **kwds)
else:
return self._f(*args, **kwds)
def unbounded_map(self):
r"""
Return the unbounded version of ``self``.
You can use this method to return a function which takes as input
an element in the domain of the combinatorial map.
See the example below.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.permutation import Permutation
sage: pi = Permutation([1,3,2])
sage: f = pi.reverse
sage: F = f.unbounded_map()
sage: F(pi)
[2, 3, 1]
"""
return self._f
def order(self):
"""
Returns the order of ``self``, or ``None`` if the order is not known.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(order=2)
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.order()
2
sage: CombinatorialClass.to_self_2.order() is None
True
"""
return self._order
def name(self):
"""
Returns the name of a combinatorial map.
This is used for the string representation of ``self``.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(name='map1')
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.name()
'map1'
sage: CombinatorialClass.to_self_2.name()
'to_self_2'
"""
if self._name is not None:
return self._name
else:
return self._f.__name__
def combinatorial_maps_in_class(cls):
"""
Return the combinatorial maps of the class as a list of combinatorial maps.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: p = Permutation([1,3,2,4])
sage: cmaps = combinatorial_maps_in_class(p)
sage: cmaps # random
[Combinatorial map: Robinson-Schensted insertion tableau,
Combinatorial map: Robinson-Schensted recording tableau,
Combinatorial map: Robinson-Schensted tableau shape,
Combinatorial map: complement,
Combinatorial map: descent composition,
Combinatorial map: inverse, ...]
sage: p.left_tableau in cmaps
True
sage: p.right_tableau in cmaps
True
sage: p.complement in cmaps
True
"""
result = set()
for method in dir(cls):
entry = getattr(cls, method)
if isinstance(entry, CombinatorialMap):
result.add(entry)
return list(result) | en | 0.516141 | Combinatorial maps This module provides a decorator that can be used to add semantic to a Python method by marking it as implementing a *combinatorial map*, that is a map between two :class:`enumerated sets <EnumeratedSets>`:: sage: from sage.combinat.combinatorial_map import combinatorial_map sage: class MyPermutation(object): ....: @combinatorial_map() ....: def reverse(self): ....: ''' ....: Reverse the permutation ....: ''' ....: # ... code ... By default, this decorator is a no-op: it returns the decorated method as is:: sage: MyPermutation.reverse <function MyPermutation.reverse at ...> See :func:`combinatorial_map_wrapper` for the various options this decorator can take. Projects built on top of Sage are welcome to customize locally this hook to instrument the Sage code and exploit this semantic information. Typically, the decorator could be used to populate a database of maps. For a real-life application, see the project `FindStat <http://findstat.org/>`. As a basic example, a variant of the decorator is provided as :func:`combinatorial_map_wrapper`; it wraps the decorated method, so that one can later use :func:`combinatorial_maps_in_class` to query an object, or class thereof, for all the combinatorial maps that apply to it. .. NOTE:: Since decorators are evaluated upon loading Python modules, customizing :obj:`combinatorial map` needs to be done before the modules using it are loaded. In the examples below, where we illustrate the customized ``combinatorial_map`` decorator on the :mod:`sage.combinat.permutation` module, we resort to force a reload of this module after dynamically changing ``sage.combinat.combinatorial_map.combinatorial_map``. This is good enough for those doctests, but remains fragile. For real use cases, it is probably best to just edit this source file statically (see below). # **************************************************************************** # Copyright (C) 2011 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # https://www.gnu.org/licenses/ # **************************************************************************** Combinatorial map decorator See :ref:`sage.combinat.combinatorial_map` for a description of this decorator and its purpose. This default implementation does nothing. INPUT: - ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function - ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps - ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later OUTPUT: - ``f`` unchanged EXAMPLES:: sage: from sage.combinat.combinatorial_map import combinatorial_map_trivial as combinatorial_map sage: class MyPermutation(object): ....: @combinatorial_map ....: def reverse(self): ....: ''' ....: Reverse the permutation ....: ''' ....: # ... code ... ....: @combinatorial_map(name='descent set of permutation') ....: def descent_set(self): ....: ''' ....: The descent set of the permutation ....: ''' ....: # ... code ... sage: MyPermutation.reverse <function MyPermutation.reverse at ...> sage: MyPermutation.descent_set <function MyPermutation.descent_set at ...> Combinatorial map decorator (basic example). See :ref:`sage.combinat.combinatorial_map` for a description of the ``combinatorial_map`` decorator and its purpose. This implementation, together with :func:`combinatorial_maps_in_class` illustrates how to use this decorator as a hook to instrument the Sage code. INPUT: - ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function - ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps - ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later OUTPUT: - A combinatorial map. This is an instance of the :class:`CombinatorialMap`. EXAMPLES: We define a class illustrating the use of this implementation of the :obj:`combinatorial_map` decorator with its various arguments:: sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map sage: class MyPermutation(object): ....: @combinatorial_map() ....: def reverse(self): ....: ''' ....: Reverse the permutation ....: ''' ....: pass ....: @combinatorial_map(order=2) ....: def inverse(self): ....: ''' ....: The inverse of the permutation ....: ''' ....: pass ....: @combinatorial_map(name='descent set of permutation') ....: def descent_set(self): ....: ''' ....: The descent set of the permutation ....: ''' ....: pass ....: def major_index(self): ....: ''' ....: The major index of the permutation ....: ''' ....: pass sage: MyPermutation.reverse Combinatorial map: reverse sage: MyPermutation.descent_set Combinatorial map: descent set of permutation sage: MyPermutation.inverse Combinatorial map: inverse One can now determine all the combinatorial maps associated with a given object as follows:: sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class sage: X = combinatorial_maps_in_class(MyPermutation); X # random [Combinatorial map: reverse, Combinatorial map: descent set of permutation, Combinatorial map: inverse] The method ``major_index`` defined about is not a combinatorial map:: sage: MyPermutation.major_index <function MyPermutation.major_index at ...> But one can define a function that turns ``major_index`` into a combinatorial map:: sage: def major_index(p): ....: return p.major_index() sage: major_index <function major_index at ...> sage: combinatorial_map(major_index) Combinatorial map: major_index ############################################################################## # Edit here to customize the combinatorial_map hook ############################################################################## # combinatorial_map = combinatorial_map_wrapper This is a wrapper class for methods that are *combinatorial maps*. For further details and doctests, see :ref:`sage.combinat.combinatorial_map` and :func:`combinatorial_map_wrapper`. Constructor for combinatorial maps. EXAMPLES:: sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map sage: def f(x): ....: "doc of f" ....: return x sage: x = combinatorial_map(f); x Combinatorial map: f sage: x.__doc__ 'doc of f' sage: x.__name__ 'f' sage: x.__module__ '__main__' EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: p = Permutation([1,3,2,4]) sage: p.left_tableau.__repr__() 'Combinatorial map: Robinson-Schensted insertion tableau' Return the source code location for the wrapped function. EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: p = Permutation([1,3,2,4]) sage: cm = p.left_tableau; cm Combinatorial map: Robinson-Schensted insertion tableau sage: (src, lines) = cm._sage_src_lines_() sage: src[0] " @combinatorial_map(name='Robinson-Schensted insertion tableau')\n" sage: lines # random 2653 Bounds the method of self to the given instance. EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: p = Permutation([1,3,2,4]) sage: p.left_tableau #indirect doctest Combinatorial map: Robinson-Schensted insertion tableau Calls the combinatorial map. EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: p = Permutation([1,3,2,4]) sage: cm = type(p).left_tableau; cm Combinatorial map: Robinson-Schensted insertion tableau sage: cm(p) [[1, 2, 4], [3]] sage: cm(Permutation([4,3,2,1])) [[1], [2], [3], [4]] Return the unbounded version of ``self``. You can use this method to return a function which takes as input an element in the domain of the combinatorial map. See the example below. EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: from sage.combinat.permutation import Permutation sage: pi = Permutation([1,3,2]) sage: f = pi.reverse sage: F = f.unbounded_map() sage: F(pi) [2, 3, 1] Returns the order of ``self``, or ``None`` if the order is not known. EXAMPLES:: sage: from sage.combinat.combinatorial_map import combinatorial_map sage: class CombinatorialClass: ....: @combinatorial_map(order=2) ....: def to_self_1(): pass ....: @combinatorial_map() ....: def to_self_2(): pass sage: CombinatorialClass.to_self_1.order() 2 sage: CombinatorialClass.to_self_2.order() is None True Returns the name of a combinatorial map. This is used for the string representation of ``self``. EXAMPLES:: sage: from sage.combinat.combinatorial_map import combinatorial_map sage: class CombinatorialClass: ....: @combinatorial_map(name='map1') ....: def to_self_1(): pass ....: @combinatorial_map() ....: def to_self_2(): pass sage: CombinatorialClass.to_self_1.name() 'map1' sage: CombinatorialClass.to_self_2.name() 'to_self_2' Return the combinatorial maps of the class as a list of combinatorial maps. For further details and doctests, see :ref:`sage.combinat.combinatorial_map` and :func:`combinatorial_map_wrapper`. EXAMPLES:: sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper sage: from importlib import reload sage: _ = reload(sage.combinat.permutation) sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class sage: p = Permutation([1,3,2,4]) sage: cmaps = combinatorial_maps_in_class(p) sage: cmaps # random [Combinatorial map: Robinson-Schensted insertion tableau, Combinatorial map: Robinson-Schensted recording tableau, Combinatorial map: Robinson-Schensted tableau shape, Combinatorial map: complement, Combinatorial map: descent composition, Combinatorial map: inverse, ...] sage: p.left_tableau in cmaps True sage: p.right_tableau in cmaps True sage: p.complement in cmaps True | 3.468513 | 3 |
tests/cppproj/xdressrc.py | xdress/xdress | 88 | 7429 | <reponame>xdress/xdress<filename>tests/cppproj/xdressrc.py
import os
from xdress.utils import apiname
package = 'cppproj'
packagedir = 'cppproj'
includes = ['src']
plugins = ('xdress.autoall', 'xdress.pep8names', 'xdress.cythongen',
'xdress.stlwrap', )
extra_types = 'cppproj_extra_types' # non-default value
dtypes = [
('map', 'str', 'int'),
('set', 'int'),
'float32',
('vector', 'int32'),
'ThreeNums',
]
stlcontainers = [
('pair', 'int', ('vector', 'int')),
('pair', 'int', 'str'),
('pair', 'int', 'int'),
('pair', 'int', 'SomeCrazyPairValue'),
('pair', 'ThreeNums', 'int'),
('vector', 'float64'),
('vector', 'str'),
('vector', 'int32'),
('vector', 'complex'),
('vector', ('vector', 'float64')),
('set', 'int'),
('set', 'str'),
('set', 'uint'),
('set', 'char'),
('set', 'ThreeNums'),
('map', 'str', 'str'),
('map', 'str', 'int'),
('map', 'int', 'str'),
('map', 'str', 'uint'),
('map', 'uint', 'str'),
('map', 'uint', 'uint'),
('map', 'str', 'float'),
('map', 'ThreeNums', 'float'),
('map', 'int', 'int'),
('map', 'int', 'bool'),
('map', 'int', 'char'),
('map', 'int', 'float'),
('map', 'uint', 'float'),
('map', 'int', 'complex'),
('map', ('pair', 'int', 'int'), 'float'),
('map', 'int', ('set', 'int')),
('map', 'int', ('set', 'str')),
('map', 'int', ('set', 'uint')),
('map', 'int', ('set', 'char')),
('map', 'int', ('vector', 'str')),
('map', 'int', ('vector', 'int')),
('map', 'int', ('vector', 'uint')),
('map', 'int', ('vector', 'char')),
('map', 'int', ('vector', 'bool')),
('map', 'int', ('vector', 'float')),
('map', 'int', ('vector', ('vector', 'float64'))),
('map', 'int', ('map', 'int', 'bool')),
('map', 'int', ('map', 'int', 'char')),
('map', 'int', ('map', 'int', 'float')),
('map', 'int', ('map', 'int', ('vector', 'bool'))),
('map', 'int', ('map', 'int', ('vector', 'char'))),
('map', 'int', ('map', 'int', ('vector', 'float'))),
('map', 'int', ('vector', ('set', 'int'))),
]
dtypes_module = 'dt'
stlcontainers_module = 'stlc'
_fromsrcdir = lambda x: os.path.join('src', x)
_inbasics = {'srcfiles': _fromsrcdir('basics.[ch]*'),
'incfiles': 'basics.hpp', # trick to get around cython generating *.h
'language': 'c++',
}
_indiscovery = {'srcfiles': _fromsrcdir('discovery*'),
'incfiles': 'discovery.h',
'language': 'c++',
}
variables = [
apiname('PersonID', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
functions = [
apiname('voided', **_inbasics),
apiname('pairs_be_crazy', tarbase='pybasics', **_inbasics),
apiname('call_with_void_fp_struct', **_inbasics),
{'srcname': 'func0',
'tarname': 'a_better_name',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('func1', **_inbasics),
apiname('func2', **_inbasics),
apiname('func3', **_inbasics),
apiname('func4', tarbase='pybasics', **_inbasics),
apiname('setfunc', **_inbasics),
apiname(('findmin', 'int32', 'float32',), **_inbasics),
apiname(('findmin', 'float64', 'float32',), **_inbasics),
{'srcname': ('findmin', 'int', 'int',),
'incfiles': 'basics.h',
'tarname': ('regmin', 'int', 'int',),
'srcfiles': _fromsrcdir('basics.[ch]*')},
{'srcname': ('findmin', 'bool', 'bool',),
'tarname': 'sillyBoolMin',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('lessthan', 'int32', 3,), **_inbasics),
apiname('call_threenums_op_from_c', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
classes = [
#apiname('struct0', 'basics', 'pybasics', 'My_Struct_0'), FIXME This needs more work
apiname('Union0', **_inbasics),
apiname('VoidFPStruct', **_inbasics),
apiname('A', **_inbasics),
apiname('B', **_inbasics),
apiname('C', **_inbasics),
apiname('SomeCrazyPairValue', tarbase='pybasics', **_inbasics),
# apiname('SomeCrazyPairValue', **_inbasics),
apiname(('TClass1', 'int32'), **_inbasics),
apiname(('TClass1', 'float64'), **_inbasics),
{'srcname': ('TClass1', 'float32'),
'tarname': 'TC1Floater',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('TClass0', 'int32'), **_inbasics),
apiname(('TClass0', 'float64'), **_inbasics),
{'srcname': ('TClass0', 'bool'),
'tarname': ('TC0Bool', 'bool'),
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('Untemplated', **_inbasics),
apiname('ThreeNums', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
apiname(('TClass0', 'float32'), **_inbasics),
apiname(('TClass2', 'float32'), **_inbasics),
apiname('NoDefault', **_inbasics),
apiname('NoDefaultChild', **_inbasics),
apiname(('EnumArg', 'JOAN'), tarbase='pybasics', **_inbasics),
]
del os
del apiname
| import os
from xdress.utils import apiname
package = 'cppproj'
packagedir = 'cppproj'
includes = ['src']
plugins = ('xdress.autoall', 'xdress.pep8names', 'xdress.cythongen',
'xdress.stlwrap', )
extra_types = 'cppproj_extra_types' # non-default value
dtypes = [
('map', 'str', 'int'),
('set', 'int'),
'float32',
('vector', 'int32'),
'ThreeNums',
]
stlcontainers = [
('pair', 'int', ('vector', 'int')),
('pair', 'int', 'str'),
('pair', 'int', 'int'),
('pair', 'int', 'SomeCrazyPairValue'),
('pair', 'ThreeNums', 'int'),
('vector', 'float64'),
('vector', 'str'),
('vector', 'int32'),
('vector', 'complex'),
('vector', ('vector', 'float64')),
('set', 'int'),
('set', 'str'),
('set', 'uint'),
('set', 'char'),
('set', 'ThreeNums'),
('map', 'str', 'str'),
('map', 'str', 'int'),
('map', 'int', 'str'),
('map', 'str', 'uint'),
('map', 'uint', 'str'),
('map', 'uint', 'uint'),
('map', 'str', 'float'),
('map', 'ThreeNums', 'float'),
('map', 'int', 'int'),
('map', 'int', 'bool'),
('map', 'int', 'char'),
('map', 'int', 'float'),
('map', 'uint', 'float'),
('map', 'int', 'complex'),
('map', ('pair', 'int', 'int'), 'float'),
('map', 'int', ('set', 'int')),
('map', 'int', ('set', 'str')),
('map', 'int', ('set', 'uint')),
('map', 'int', ('set', 'char')),
('map', 'int', ('vector', 'str')),
('map', 'int', ('vector', 'int')),
('map', 'int', ('vector', 'uint')),
('map', 'int', ('vector', 'char')),
('map', 'int', ('vector', 'bool')),
('map', 'int', ('vector', 'float')),
('map', 'int', ('vector', ('vector', 'float64'))),
('map', 'int', ('map', 'int', 'bool')),
('map', 'int', ('map', 'int', 'char')),
('map', 'int', ('map', 'int', 'float')),
('map', 'int', ('map', 'int', ('vector', 'bool'))),
('map', 'int', ('map', 'int', ('vector', 'char'))),
('map', 'int', ('map', 'int', ('vector', 'float'))),
('map', 'int', ('vector', ('set', 'int'))),
]
dtypes_module = 'dt'
stlcontainers_module = 'stlc'
_fromsrcdir = lambda x: os.path.join('src', x)
_inbasics = {'srcfiles': _fromsrcdir('basics.[ch]*'),
'incfiles': 'basics.hpp', # trick to get around cython generating *.h
'language': 'c++',
}
_indiscovery = {'srcfiles': _fromsrcdir('discovery*'),
'incfiles': 'discovery.h',
'language': 'c++',
}
variables = [
apiname('PersonID', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
functions = [
apiname('voided', **_inbasics),
apiname('pairs_be_crazy', tarbase='pybasics', **_inbasics),
apiname('call_with_void_fp_struct', **_inbasics),
{'srcname': 'func0',
'tarname': 'a_better_name',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('func1', **_inbasics),
apiname('func2', **_inbasics),
apiname('func3', **_inbasics),
apiname('func4', tarbase='pybasics', **_inbasics),
apiname('setfunc', **_inbasics),
apiname(('findmin', 'int32', 'float32',), **_inbasics),
apiname(('findmin', 'float64', 'float32',), **_inbasics),
{'srcname': ('findmin', 'int', 'int',),
'incfiles': 'basics.h',
'tarname': ('regmin', 'int', 'int',),
'srcfiles': _fromsrcdir('basics.[ch]*')},
{'srcname': ('findmin', 'bool', 'bool',),
'tarname': 'sillyBoolMin',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('lessthan', 'int32', 3,), **_inbasics),
apiname('call_threenums_op_from_c', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
]
classes = [
#apiname('struct0', 'basics', 'pybasics', 'My_Struct_0'), FIXME This needs more work
apiname('Union0', **_inbasics),
apiname('VoidFPStruct', **_inbasics),
apiname('A', **_inbasics),
apiname('B', **_inbasics),
apiname('C', **_inbasics),
apiname('SomeCrazyPairValue', tarbase='pybasics', **_inbasics),
# apiname('SomeCrazyPairValue', **_inbasics),
apiname(('TClass1', 'int32'), **_inbasics),
apiname(('TClass1', 'float64'), **_inbasics),
{'srcname': ('TClass1', 'float32'),
'tarname': 'TC1Floater',
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname(('TClass0', 'int32'), **_inbasics),
apiname(('TClass0', 'float64'), **_inbasics),
{'srcname': ('TClass0', 'bool'),
'tarname': ('TC0Bool', 'bool'),
'incfiles': 'basics.h',
'srcfiles': _fromsrcdir('basics.[ch]*')},
apiname('Untemplated', **_inbasics),
apiname('ThreeNums', tarbase='pybasics', **_inbasics),
apiname('*', **_indiscovery),
apiname(('TClass0', 'float32'), **_inbasics),
apiname(('TClass2', 'float32'), **_inbasics),
apiname('NoDefault', **_inbasics),
apiname('NoDefaultChild', **_inbasics),
apiname(('EnumArg', 'JOAN'), tarbase='pybasics', **_inbasics),
]
del os
del apiname | en | 0.458194 | # non-default value # trick to get around cython generating *.h #apiname('struct0', 'basics', 'pybasics', 'My_Struct_0'), FIXME This needs more work # apiname('SomeCrazyPairValue', **_inbasics), | 1.615094 | 2 |
routines/server.py | henryshunt/c-aws | 0 | 7430 | <gh_stars>0
import os
import subprocess
import routines.config as config
import routines.helpers as helpers
def get_static_info():
""" Outputs data concerning the computer in the C-AWS station
"""
startup_time = None
data_drive_space = None
camera_drive_space = None
# Get system startup time
try:
startup_time = (subprocess
.check_output(["uptime", "-s"]).decode().rstrip())
except: pass
# Get data and camera drive space
if config.load() == True:
if os.path.isdir(config.data_directory):
free_space = helpers.remaining_space(config.data_directory)
if free_space != None:
data_drive_space = round(free_space, 2)
if (config.camera_directory != None and os.path.isdir(
config.camera_directory) and os.path.ismount(
config.camera_directory)):
free_space = helpers.remaining_space(config.camera_directory)
if free_space != None:
camera_drive_space = round(free_space, 2)
print(str(helpers.none_to_null(startup_time)) + "\n"
+ str(helpers.none_to_null(data_drive_space)) + "\n"
+ str(helpers.none_to_null(camera_drive_space))) | import os
import subprocess
import routines.config as config
import routines.helpers as helpers
def get_static_info():
""" Outputs data concerning the computer in the C-AWS station
"""
startup_time = None
data_drive_space = None
camera_drive_space = None
# Get system startup time
try:
startup_time = (subprocess
.check_output(["uptime", "-s"]).decode().rstrip())
except: pass
# Get data and camera drive space
if config.load() == True:
if os.path.isdir(config.data_directory):
free_space = helpers.remaining_space(config.data_directory)
if free_space != None:
data_drive_space = round(free_space, 2)
if (config.camera_directory != None and os.path.isdir(
config.camera_directory) and os.path.ismount(
config.camera_directory)):
free_space = helpers.remaining_space(config.camera_directory)
if free_space != None:
camera_drive_space = round(free_space, 2)
print(str(helpers.none_to_null(startup_time)) + "\n"
+ str(helpers.none_to_null(data_drive_space)) + "\n"
+ str(helpers.none_to_null(camera_drive_space))) | en | 0.835937 | Outputs data concerning the computer in the C-AWS station # Get system startup time # Get data and camera drive space | 2.554212 | 3 |
DQM/DTMonitorModule/python/dtChamberEfficiencyHI_cfi.py | pasmuss/cmssw | 0 | 7431 | <gh_stars>0
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import MuonServiceProxy
dtEfficiencyMonitor = cms.EDAnalyzer("DTChamberEfficiency",
MuonServiceProxy,
debug = cms.untracked.bool(True),
TrackCollection = cms.InputTag("standAloneMuons"),
theMaxChi2 = cms.double(1000.),
theNSigma = cms.double(3.),
theMinNrec = cms.double(5.),
dt4DSegments = cms.InputTag("dt4DSegments"),
theRPCRecHits = cms.InputTag("dummy"),
thegemRecHits = cms.InputTag("dummy"),
cscSegments = cms.InputTag("dummy"),
RPCLayers = cms.bool(False),
NavigationType = cms.string("Standard")
)
| import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import MuonServiceProxy
dtEfficiencyMonitor = cms.EDAnalyzer("DTChamberEfficiency",
MuonServiceProxy,
debug = cms.untracked.bool(True),
TrackCollection = cms.InputTag("standAloneMuons"),
theMaxChi2 = cms.double(1000.),
theNSigma = cms.double(3.),
theMinNrec = cms.double(5.),
dt4DSegments = cms.InputTag("dt4DSegments"),
theRPCRecHits = cms.InputTag("dummy"),
thegemRecHits = cms.InputTag("dummy"),
cscSegments = cms.InputTag("dummy"),
RPCLayers = cms.bool(False),
NavigationType = cms.string("Standard")
) | none | 1 | 1.244699 | 1 |
|
gym_reinmav/envs/mujoco/__init__.py | peterminh227/reinmav-gym | 60 | 7432 | from gym_reinmav.envs.mujoco.mujoco_quad import MujocoQuadEnv
from gym_reinmav.envs.mujoco.mujoco_quad_hovering import MujocoQuadHoveringEnv
from gym_reinmav.envs.mujoco.mujoco_quad_quat import MujocoQuadQuaternionEnv | from gym_reinmav.envs.mujoco.mujoco_quad import MujocoQuadEnv
from gym_reinmav.envs.mujoco.mujoco_quad_hovering import MujocoQuadHoveringEnv
from gym_reinmav.envs.mujoco.mujoco_quad_quat import MujocoQuadQuaternionEnv | none | 1 | 1.143716 | 1 |
|
test.py | jasonivey/scripts | 0 | 7433 | <reponame>jasonivey/scripts
#!/usr/bin/env python3
# vim:softtabstop=4:ts=4:sw=4:expandtab:tw=120
from ansimarkup import AnsiMarkup, parse
import csv
import datetime
import operator
import os
from pathlib import Path
import re
import sys
import traceback
_VERBOSE = False
user_tags = {
'error' : parse('<bold><red>'),
'name' : parse('<bold><cyan>'),
'value' : parse('<bold><white>'),
}
am = AnsiMarkup(tags=user_tags)
def _assert_msg(msg):
return am.ansistring(f'<error>{msg}</error>')
def _print_name_value(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
lh = am.ansistring(f'<name>{name}</name>')
rh = am.ansistring(f'<value>{value}</value>')
print(f'{prefix}{lh:{max_name_len + lh.delta}} {rh}{postfix}')
def _get_name_value_compact(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
return am.ansistring(f'{prefix}<name>{name}</name> <value>{value}</value>{postfix}')
def _get_timezone_info():
return datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def _convert_date_time(dt):
return f'{dt:%d-%b-%Y %I:%M:%S%p %Z}'.replace('AM', 'am').replace('PM', 'pm')
def _parse_datetime(dt_str):
dt = datetime.datetime.strptime(dt_str, '%m/%d/%Y %I:%M %p') # Example '11/08/2011 03:00 PM'
tz = _get_timezone_info()
return dt.replace(tzinfo=tz)
def _parse_datetime_row(row):
return _parse_datetime(' '.join(row[2:4]))
def _parse_appointment_row(row, index):
assert len(row) >= 4, _assert_msg(f'row {index} does not have 4 or more columns as required')
appt_time = _parse_datetime(' '.join(row[2:4]))
appt_type = row[0].title()
doctor = row[1].title()
return appt_time, appt_type, doctor
def parse_doctor_appointments(file_name):
path = Path(os.path.expandvars(file_name))
with path.open(newline='', encoding='utf-8') as handle:
reader = csv.reader(handle)
sorted_rows = sorted(reader, key=lambda x: _parse_datetime_row(x))
for index, row in enumerate(sorted_rows):
yield _parse_appointment_row(row, index)
def get_doctors_appointments():
MAX_WIDTH = len('Appointment:')
file_name = '$HOME/Downloads/crump-visits.csv'
for appt_time, appt_type, doctor in parse_doctor_appointments(file_name):
s = _get_name_value_compact('Appointment:', None, _convert_date_time(appt_time), postfix=', ')
s += _get_name_value_compact('Type:', None, appt_type, postfix=', ')
print(s + _get_name_value_compact('Doctor:', None, doctor))
def main(args):
try:
get_doctors_appointments()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/env python3
# vim:softtabstop=4:ts=4:sw=4:expandtab:tw=120
from ansimarkup import AnsiMarkup, parse
import csv
import datetime
import operator
import os
from pathlib import Path
import re
import sys
import traceback
_VERBOSE = False
user_tags = {
'error' : parse('<bold><red>'),
'name' : parse('<bold><cyan>'),
'value' : parse('<bold><white>'),
}
am = AnsiMarkup(tags=user_tags)
def _assert_msg(msg):
return am.ansistring(f'<error>{msg}</error>')
def _print_name_value(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
lh = am.ansistring(f'<name>{name}</name>')
rh = am.ansistring(f'<value>{value}</value>')
print(f'{prefix}{lh:{max_name_len + lh.delta}} {rh}{postfix}')
def _get_name_value_compact(name, max_name_len, value, prefix=None, postfix=None):
prefix = prefix if prefix is not None else ''
postfix = postfix if postfix is not None else ''
return am.ansistring(f'{prefix}<name>{name}</name> <value>{value}</value>{postfix}')
def _get_timezone_info():
return datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def _convert_date_time(dt):
return f'{dt:%d-%b-%Y %I:%M:%S%p %Z}'.replace('AM', 'am').replace('PM', 'pm')
def _parse_datetime(dt_str):
dt = datetime.datetime.strptime(dt_str, '%m/%d/%Y %I:%M %p') # Example '11/08/2011 03:00 PM'
tz = _get_timezone_info()
return dt.replace(tzinfo=tz)
def _parse_datetime_row(row):
return _parse_datetime(' '.join(row[2:4]))
def _parse_appointment_row(row, index):
assert len(row) >= 4, _assert_msg(f'row {index} does not have 4 or more columns as required')
appt_time = _parse_datetime(' '.join(row[2:4]))
appt_type = row[0].title()
doctor = row[1].title()
return appt_time, appt_type, doctor
def parse_doctor_appointments(file_name):
path = Path(os.path.expandvars(file_name))
with path.open(newline='', encoding='utf-8') as handle:
reader = csv.reader(handle)
sorted_rows = sorted(reader, key=lambda x: _parse_datetime_row(x))
for index, row in enumerate(sorted_rows):
yield _parse_appointment_row(row, index)
def get_doctors_appointments():
MAX_WIDTH = len('Appointment:')
file_name = '$HOME/Downloads/crump-visits.csv'
for appt_time, appt_type, doctor in parse_doctor_appointments(file_name):
s = _get_name_value_compact('Appointment:', None, _convert_date_time(appt_time), postfix=', ')
s += _get_name_value_compact('Type:', None, appt_type, postfix=', ')
print(s + _get_name_value_compact('Doctor:', None, doctor))
def main(args):
try:
get_doctors_appointments()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv)) | ru | 0.137217 | #!/usr/bin/env python3 # vim:softtabstop=4:ts=4:sw=4:expandtab:tw=120 # Example '11/08/2011 03:00 PM' | 2.418618 | 2 |
RDyn-master/rdyn/test/rdyn_test.py | nogrady/dynamo | 12 | 7434 | import unittest
import shutil
from rdyn.alg.RDyn_v2 import RDynV2
class RDynTestCase(unittest.TestCase):
def test_rdyn_simplified(self):
print("1")
rdb = RDynV2(size=500, iterations=100)
rdb.execute(simplified=True)
print("2")
rdb = RDynV2(size=500, iterations=100, max_evts=2)
rdb.execute(simplified=True)
print("3")
rdb = RDynV2(size=500, iterations=100, new_node=0.1, del_node=0.1, max_evts=2, paction=0.8)
rdb.execute(simplified=False)
print("Done")
shutil.rmtree("results")
if __name__ == '__main__':
unittest.main()
| import unittest
import shutil
from rdyn.alg.RDyn_v2 import RDynV2
class RDynTestCase(unittest.TestCase):
def test_rdyn_simplified(self):
print("1")
rdb = RDynV2(size=500, iterations=100)
rdb.execute(simplified=True)
print("2")
rdb = RDynV2(size=500, iterations=100, max_evts=2)
rdb.execute(simplified=True)
print("3")
rdb = RDynV2(size=500, iterations=100, new_node=0.1, del_node=0.1, max_evts=2, paction=0.8)
rdb.execute(simplified=False)
print("Done")
shutil.rmtree("results")
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.500959 | 3 |
|
recognition/datasets/build.py | Jung-Jun-Uk/UNPG | 7 | 7435 | <reponame>Jung-Jun-Uk/UNPG
import os
from .kface import KFace
from .ms1m import MS1M
from .bin_datasets import BIN
from .ijb import IJB
def build_datasets(data_cfg, batch_size, cuda, workers, mode, rank=-1):
assert mode in ['train', 'test']
cfg = data_cfg[mode]
if cfg['dataset'] == 'kface':
dataset = KFace(cfg['data_path'], cfg['test_idx_txt'], cfg['acs'], cfg['lux'], cfg['eps'], cfg['pose'],
cfg['img_size'], batch_size, cuda, workers, mode=mode)
elif cfg['dataset'] == 'ms1m':
dataset = MS1M(cfg['data_path'], cfg['preprocessed_file'], cfg['img_size'], cfg['min_img'],
batch_size, cuda, workers, mode=mode, rank=rank)
elif cfg['dataset'] == 'bin':
root, file_names = cfg['root'], cfg['file_names']
if isinstance(file_names, str):
data_path = os.path.join(root, file_names)
dataset = BIN(data_path, cfg['img_size'], batch_size, cuda, workers)
elif isinstance(file_names, list):
data_path = [os.path.join(root, f) for f in file_names]
dataset = [BIN(dp, cfg['img_size'], batch_size, cuda, workers) for dp in data_path]
elif cfg['dataset'] in ['ijbb', 'ijbc']:
dataset = IJB(cfg['root'], cfg['inf_list'], cfg['img_size'], batch_size, cuda, workers)
return dataset
| import os
from .kface import KFace
from .ms1m import MS1M
from .bin_datasets import BIN
from .ijb import IJB
def build_datasets(data_cfg, batch_size, cuda, workers, mode, rank=-1):
assert mode in ['train', 'test']
cfg = data_cfg[mode]
if cfg['dataset'] == 'kface':
dataset = KFace(cfg['data_path'], cfg['test_idx_txt'], cfg['acs'], cfg['lux'], cfg['eps'], cfg['pose'],
cfg['img_size'], batch_size, cuda, workers, mode=mode)
elif cfg['dataset'] == 'ms1m':
dataset = MS1M(cfg['data_path'], cfg['preprocessed_file'], cfg['img_size'], cfg['min_img'],
batch_size, cuda, workers, mode=mode, rank=rank)
elif cfg['dataset'] == 'bin':
root, file_names = cfg['root'], cfg['file_names']
if isinstance(file_names, str):
data_path = os.path.join(root, file_names)
dataset = BIN(data_path, cfg['img_size'], batch_size, cuda, workers)
elif isinstance(file_names, list):
data_path = [os.path.join(root, f) for f in file_names]
dataset = [BIN(dp, cfg['img_size'], batch_size, cuda, workers) for dp in data_path]
elif cfg['dataset'] in ['ijbb', 'ijbc']:
dataset = IJB(cfg['root'], cfg['inf_list'], cfg['img_size'], batch_size, cuda, workers)
return dataset | none | 1 | 1.856429 | 2 |
|
django_cd/notifications.py | ppinard/django-cd | 1 | 7436 | """"""
# Standard library modules.
import abc
# Third party modules.
from django.core.mail import send_mail
from django.template import Engine, Context
# Local modules.
from .models import RunState
# Globals and constants variables.
class Notification(metaclass=abc.ABCMeta):
@classmethod
def notify(self, jobrun):
raise NotImplementedError
class EmailNotification(Notification):
def __init__(self, recipients, on_success=False, on_failure=True):
self.recipients = tuple(recipients)
self.on_success = on_success
self.on_failure = on_failure
def __str__(self):
return "email"
def notify(self, jobrun):
if (jobrun.state in [RunState.ERROR, RunState.FAILED] and self.on_failure) or (
jobrun.state == RunState.SUCCESS and self.on_success
):
engine = Engine.get_default()
template = engine.get_template("django_cd/jobrun_report.html")
context = Context({"jobrun": jobrun})
html_message = template.render(context)
send_mail(
subject=f"Job report - {jobrun.name} - {jobrun.state}",
message="",
from_email=None,
recipient_list=self.recipients,
html_message=html_message,
)
| """"""
# Standard library modules.
import abc
# Third party modules.
from django.core.mail import send_mail
from django.template import Engine, Context
# Local modules.
from .models import RunState
# Globals and constants variables.
class Notification(metaclass=abc.ABCMeta):
@classmethod
def notify(self, jobrun):
raise NotImplementedError
class EmailNotification(Notification):
def __init__(self, recipients, on_success=False, on_failure=True):
self.recipients = tuple(recipients)
self.on_success = on_success
self.on_failure = on_failure
def __str__(self):
return "email"
def notify(self, jobrun):
if (jobrun.state in [RunState.ERROR, RunState.FAILED] and self.on_failure) or (
jobrun.state == RunState.SUCCESS and self.on_success
):
engine = Engine.get_default()
template = engine.get_template("django_cd/jobrun_report.html")
context = Context({"jobrun": jobrun})
html_message = template.render(context)
send_mail(
subject=f"Job report - {jobrun.name} - {jobrun.state}",
message="",
from_email=None,
recipient_list=self.recipients,
html_message=html_message,
)
| en | 0.495252 | # Standard library modules. # Third party modules. # Local modules. # Globals and constants variables. | 2.288557 | 2 |
Experiment/ltpFR3_MTurk/ListGen/ltpFR3_listgen.py | jpazdera/PazdKaha22 | 0 | 7437 | <reponame>jpazdera/PazdKaha22<filename>Experiment/ltpFR3_MTurk/ListGen/ltpFR3_listgen.py<gh_stars>0
#!/usr/bin/env python2
import random
import itertools
import numpy
import sys
import json
import copy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def randomize_conditions_ltpFR3(config):
"""
Randomize the conditions for all sessions.
:param config: The imported configuration file, containing all parameters for the experiment
:return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0]
defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y;
cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the
duration of the pre-list distractor task for session x, list y.
"""
options = [c for c in itertools.product(config.listLength, config.presRate, config.modality, config.distDur)]
cond = []
for i in range(config.nSessions):
sess = []
for j in range(config.reps):
random.shuffle(options)
sess += options[:]
cond.append(sess)
return cond
def choose_pairs_ltpFR3(wp_tot, cond, config, semRows, semCols):
"""
Selects word pairs to use in each list of each session.
:param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to
the indices used by semRows and semCols.
:param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session.
:param config: The imported configuration file, containing all parameters for the experiment.
:param semRows: See make_bins_ltpFR3()
:param semCols: See make_bins_ltpFR3()
:return: pairs - pairs[x][y][z] is the zth word pair in session x, list y
:return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session
:return: practice_lists - A list containing two practice lists, each with 18 words
"""
# pairs[x][y][z] will be the zth pair of words in the yth list on session x
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot
full_wp = wp_tot[:]
# Make word pairs for each session
session_num = 0
while session_num < config.nSessions:
#print 'Making session', session_num, ':',
#sys.stdout.flush()
# Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in
# order to randomize which words are selected for the practice lists. All other lists have their items randomly
# chosen anyway
'''
IMPORTANT NOTE!!!:
Lists containing more than 2080 elements should not be randomized with shuffle, as explained here:
http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle
The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded.
'''
random.shuffle(full_wp)
# The first session has two 18-word practice lists
if session_num == 0:
practice_lists = [full_wp[:18], full_wp[18:36]]
sess_wp_allowed = full_wp[36:]
else:
sess_wp_allowed = full_wp[:]
# sess_pairs[x][y] will be the yth pair in the xth list on the current session
sess_pairs = []
# Track number of attempts to create the lists for the current session
sess_tries = 0
# Track whether the session completed successfully
goodSess = True
# Make word pairs for each list in the current session
list_num = 0
while list_num < len(cond[session_num]):
#print list_num,
#sys.stdout.flush()
# list_pairs[x] will be the xth pair in the current list on the current session
list_pairs = []
# Track number of attempts to create the current list
list_tries = 0
# Track whether the list completed successfully
goodList = True
# Retrieve the list length condition for the current list by looking in cond
listLength = cond[session_num][list_num][0]
# Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin
pairs_per_bin = 2 if listLength == 12 else 4
# Select two or four word pairs from each bin (based on list length)
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i < pairs_per_bin:
# Get the indices (within the full word pool) of the words chosen for the current session
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
# Randomly choose indices/words from those in the current session until one is found that has one
# or more pairs in the current bin
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get the indices of all words whose pairing with the chosen word falls into the correct bin
good_second_indices = semCols[sem_i][semRows[sem_i] == index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Ensure that a word cannot be accidentally paired with itself
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# If there are no good words to choose from, restart
if len(good_second_indices) == 0:
list_tries += 1
if list_tries > 10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append([wp_tot[index_word1], wp_tot[index_word2]])
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i += 1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs] + [x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num += 1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries > 50:
goodSess = False
break
# If the whole session went successfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
pairs.append(sess_pairs)
session_num += 1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts, practice_lists
def place_pairs_ltpFR3(pairs, cond):
"""
:param pairs:
:param cond:
:param config:
:return:
"""
# Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error)
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
# Loop through sessions
subj_wo = []
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
#print '\nPlacing session', n, ':',
#sys.stdout.flush()
# Loop through lists within each session
for (m, list_pairs) in enumerate(sess_pairs):
#print m,
#sys.stdout.flush()
# Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant
grouped_pairs = [list(group) for group in
zip([list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 0],
[list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 1])]
# Retrieve list length for the current list
list_length = cond[n][m][0]
# For 12-item lists, select a random solution template and assign word pairs to the variables in the
# template, such that one pair from each bin has adjacent presentation and one pair from each bin has
# distant presentation
if list_length == 12:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo)):
w = list_wo[i]
list_wo[i] = key[w[0]][int(w[1])]
# For 24-item lists, create two 12-item lists based on random solution templates and concatenate them.
elif list_length == 24:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents1 = ['a', 'b', 'c']
distants1 = ['d', 'e', 'f']
adjacents2 = ['a', 'b', 'c']
distants2 = ['d', 'e', 'f']
random.shuffle(adjacents1)
random.shuffle(distants1)
random.shuffle(adjacents2)
random.shuffle(distants2)
key1 = {}
key2 = {}
for group_num, group in enumerate(grouped_pairs):
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
if group_num % 2 == 0:
key1[adjacents1.pop(0)] = group[0]
key1[distants1.pop(0)] = group[1]
else:
key2[adjacents2.pop(0)] = group[0]
key2[distants2.pop(0)] = group[1]
# Choose a random valid solution
list_wo1 = copy.deepcopy(random.choice(valid12))
list_wo2 = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo1)):
w = list_wo1[i]
list_wo1[i] = key1[w[0]][int(w[1])]
w = list_wo2[i]
list_wo2[i] = key2[w[0]][int(w[1])]
list_wo = list_wo1 + list_wo2
else:
raise ValueError('Function place_pairs_ltpFR3() can only handle word lists of length 12 or 24!')
# Add finalized list to the session
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def listgen_ltpFR3(n):
"""
Generate all lists for a participant, including the conditions, word pairs
and word ordering. This function saves the results to a json file labelled
with the participant's number.
"""
import config
# Read in the semantic association matrix
semMat = []
with open(config.w2vfile) as w2vfile:
for word in w2vfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
# Create three semantic similarity bins and sort word pairs by bin
semRows, semCols = make_bins_ltpFR3(semArray)
# Read in the word pool
with open(config.wpfile) as wpfile:
wp_tot = [x.strip() for x in wpfile.readlines()]
counts = numpy.zeros(len(wp_tot))
for i in range(n):
print '\nSubject ' + str(i) + '\n'
# Randomize list conditions (list length, presentation rate, modality, distractor duration)
condi = randomize_conditions_ltpFR3(config)
# Choose all of the pairs to be used in the experiment
pairs, pair_dicts, practice_lists = choose_pairs_ltpFR3(wp_tot, condi, config, semRows, semCols)
# Create all lists by placing the word pairs in appropriate positions
subj_wo = place_pairs_ltpFR3(pairs, condi)
# Add practice lists
subj_wo[0] = practice_lists + subj_wo[0]
practice_condi = [[18, 1200, 'a', 18000], [18, 1200, 'v', 18000]]
random.shuffle(practice_condi)
condi[0] = practice_condi + condi[0]
d = {'word_order': subj_wo, 'pairs': pair_dicts, 'conditions': condi}
for sess_dict in pair_dicts:
counts[numpy.array([wp_tot.index(w) for w in sess_dict])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[0]])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[1]])] += 1
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/%d.js' % i, 'w') as f:
s = 'var sess_info = ' + json.dumps(d) + ';'
f.write(s)
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/counts.json', 'w') as f:
f.write(str([c for c in counts]))
print max(counts), min(counts), len([wp_tot[i] for i in range(len(counts)) if counts[i] == 0])
return counts
if __name__ == "__main__":
nsess = input('How many sessions would you like to generate? ')
counts = listgen_ltpFR3(nsess)
print counts.mean()
print counts.std()
print counts.max()
print counts.min()
| #!/usr/bin/env python2
import random
import itertools
import numpy
import sys
import json
import copy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def randomize_conditions_ltpFR3(config):
"""
Randomize the conditions for all sessions.
:param config: The imported configuration file, containing all parameters for the experiment
:return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0]
defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y;
cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the
duration of the pre-list distractor task for session x, list y.
"""
options = [c for c in itertools.product(config.listLength, config.presRate, config.modality, config.distDur)]
cond = []
for i in range(config.nSessions):
sess = []
for j in range(config.reps):
random.shuffle(options)
sess += options[:]
cond.append(sess)
return cond
def choose_pairs_ltpFR3(wp_tot, cond, config, semRows, semCols):
"""
Selects word pairs to use in each list of each session.
:param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to
the indices used by semRows and semCols.
:param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session.
:param config: The imported configuration file, containing all parameters for the experiment.
:param semRows: See make_bins_ltpFR3()
:param semCols: See make_bins_ltpFR3()
:return: pairs - pairs[x][y][z] is the zth word pair in session x, list y
:return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session
:return: practice_lists - A list containing two practice lists, each with 18 words
"""
# pairs[x][y][z] will be the zth pair of words in the yth list on session x
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot
full_wp = wp_tot[:]
# Make word pairs for each session
session_num = 0
while session_num < config.nSessions:
#print 'Making session', session_num, ':',
#sys.stdout.flush()
# Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in
# order to randomize which words are selected for the practice lists. All other lists have their items randomly
# chosen anyway
'''
IMPORTANT NOTE!!!:
Lists containing more than 2080 elements should not be randomized with shuffle, as explained here:
http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle
The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded.
'''
random.shuffle(full_wp)
# The first session has two 18-word practice lists
if session_num == 0:
practice_lists = [full_wp[:18], full_wp[18:36]]
sess_wp_allowed = full_wp[36:]
else:
sess_wp_allowed = full_wp[:]
# sess_pairs[x][y] will be the yth pair in the xth list on the current session
sess_pairs = []
# Track number of attempts to create the lists for the current session
sess_tries = 0
# Track whether the session completed successfully
goodSess = True
# Make word pairs for each list in the current session
list_num = 0
while list_num < len(cond[session_num]):
#print list_num,
#sys.stdout.flush()
# list_pairs[x] will be the xth pair in the current list on the current session
list_pairs = []
# Track number of attempts to create the current list
list_tries = 0
# Track whether the list completed successfully
goodList = True
# Retrieve the list length condition for the current list by looking in cond
listLength = cond[session_num][list_num][0]
# Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin
pairs_per_bin = 2 if listLength == 12 else 4
# Select two or four word pairs from each bin (based on list length)
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i < pairs_per_bin:
# Get the indices (within the full word pool) of the words chosen for the current session
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
# Randomly choose indices/words from those in the current session until one is found that has one
# or more pairs in the current bin
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get the indices of all words whose pairing with the chosen word falls into the correct bin
good_second_indices = semCols[sem_i][semRows[sem_i] == index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Ensure that a word cannot be accidentally paired with itself
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# If there are no good words to choose from, restart
if len(good_second_indices) == 0:
list_tries += 1
if list_tries > 10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append([wp_tot[index_word1], wp_tot[index_word2]])
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i += 1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs] + [x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num += 1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries > 50:
goodSess = False
break
# If the whole session went successfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
pairs.append(sess_pairs)
session_num += 1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts, practice_lists
def place_pairs_ltpFR3(pairs, cond):
"""
:param pairs:
:param cond:
:param config:
:return:
"""
# Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error)
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
# Loop through sessions
subj_wo = []
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
#print '\nPlacing session', n, ':',
#sys.stdout.flush()
# Loop through lists within each session
for (m, list_pairs) in enumerate(sess_pairs):
#print m,
#sys.stdout.flush()
# Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant
grouped_pairs = [list(group) for group in
zip([list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 0],
[list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 1])]
# Retrieve list length for the current list
list_length = cond[n][m][0]
# For 12-item lists, select a random solution template and assign word pairs to the variables in the
# template, such that one pair from each bin has adjacent presentation and one pair from each bin has
# distant presentation
if list_length == 12:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo)):
w = list_wo[i]
list_wo[i] = key[w[0]][int(w[1])]
# For 24-item lists, create two 12-item lists based on random solution templates and concatenate them.
elif list_length == 24:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents1 = ['a', 'b', 'c']
distants1 = ['d', 'e', 'f']
adjacents2 = ['a', 'b', 'c']
distants2 = ['d', 'e', 'f']
random.shuffle(adjacents1)
random.shuffle(distants1)
random.shuffle(adjacents2)
random.shuffle(distants2)
key1 = {}
key2 = {}
for group_num, group in enumerate(grouped_pairs):
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
if group_num % 2 == 0:
key1[adjacents1.pop(0)] = group[0]
key1[distants1.pop(0)] = group[1]
else:
key2[adjacents2.pop(0)] = group[0]
key2[distants2.pop(0)] = group[1]
# Choose a random valid solution
list_wo1 = copy.deepcopy(random.choice(valid12))
list_wo2 = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo1)):
w = list_wo1[i]
list_wo1[i] = key1[w[0]][int(w[1])]
w = list_wo2[i]
list_wo2[i] = key2[w[0]][int(w[1])]
list_wo = list_wo1 + list_wo2
else:
raise ValueError('Function place_pairs_ltpFR3() can only handle word lists of length 12 or 24!')
# Add finalized list to the session
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def listgen_ltpFR3(n):
"""
Generate all lists for a participant, including the conditions, word pairs
and word ordering. This function saves the results to a json file labelled
with the participant's number.
"""
import config
# Read in the semantic association matrix
semMat = []
with open(config.w2vfile) as w2vfile:
for word in w2vfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
# Create three semantic similarity bins and sort word pairs by bin
semRows, semCols = make_bins_ltpFR3(semArray)
# Read in the word pool
with open(config.wpfile) as wpfile:
wp_tot = [x.strip() for x in wpfile.readlines()]
counts = numpy.zeros(len(wp_tot))
for i in range(n):
print '\nSubject ' + str(i) + '\n'
# Randomize list conditions (list length, presentation rate, modality, distractor duration)
condi = randomize_conditions_ltpFR3(config)
# Choose all of the pairs to be used in the experiment
pairs, pair_dicts, practice_lists = choose_pairs_ltpFR3(wp_tot, condi, config, semRows, semCols)
# Create all lists by placing the word pairs in appropriate positions
subj_wo = place_pairs_ltpFR3(pairs, condi)
# Add practice lists
subj_wo[0] = practice_lists + subj_wo[0]
practice_condi = [[18, 1200, 'a', 18000], [18, 1200, 'v', 18000]]
random.shuffle(practice_condi)
condi[0] = practice_condi + condi[0]
d = {'word_order': subj_wo, 'pairs': pair_dicts, 'conditions': condi}
for sess_dict in pair_dicts:
counts[numpy.array([wp_tot.index(w) for w in sess_dict])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[0]])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[1]])] += 1
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/%d.js' % i, 'w') as f:
s = 'var sess_info = ' + json.dumps(d) + ';'
f.write(s)
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/counts.json', 'w') as f:
f.write(str([c for c in counts]))
print max(counts), min(counts), len([wp_tot[i] for i in range(len(counts)) if counts[i] == 0])
return counts
if __name__ == "__main__":
nsess = input('How many sessions would you like to generate? ')
counts = listgen_ltpFR3(nsess)
print counts.mean()
print counts.std()
print counts.max()
print counts.min() | en | 0.792145 | #!/usr/bin/env python2 Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give three bins: low similarity, medium similarity, and high similarity. A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith similarity bin. # Find lowest and highest similarity # Split up the semantic space into four equal segments # Combine the two middle bins by removing the bin boundary between them # semBins = semBins[:2] + semBins[3:] # Create bounds for the bins # For word pairs within the bounds of each bin, append the indices to semRows and semCols Randomize the conditions for all sessions. :param config: The imported configuration file, containing all parameters for the experiment :return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0] defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y; cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the duration of the pre-list distractor task for session x, list y. Selects word pairs to use in each list of each session. :param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to the indices used by semRows and semCols. :param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session. :param config: The imported configuration file, containing all parameters for the experiment. :param semRows: See make_bins_ltpFR3() :param semCols: See make_bins_ltpFR3() :return: pairs - pairs[x][y][z] is the zth word pair in session x, list y :return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session :return: practice_lists - A list containing two practice lists, each with 18 words # pairs[x][y][z] will be the zth pair of words in the yth list on session x # points to the other word in the pair for a given session # Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot # Make word pairs for each session #print 'Making session', session_num, ':', #sys.stdout.flush() # Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in # order to randomize which words are selected for the practice lists. All other lists have their items randomly # chosen anyway IMPORTANT NOTE!!!: Lists containing more than 2080 elements should not be randomized with shuffle, as explained here: http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded. # The first session has two 18-word practice lists # sess_pairs[x][y] will be the yth pair in the xth list on the current session # Track number of attempts to create the lists for the current session # Track whether the session completed successfully # Make word pairs for each list in the current session #print list_num, #sys.stdout.flush() # list_pairs[x] will be the xth pair in the current list on the current session # Track number of attempts to create the current list # Track whether the list completed successfully # Retrieve the list length condition for the current list by looking in cond # Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin # Select two or four word pairs from each bin (based on list length) # The pair for each semantic bin gets placed twice # Get the indices (within the full word pool) of the words chosen for the current session # Randomly choose indices/words from those in the current session until one is found that has one # or more pairs in the current bin # Get the indices of all words whose pairing with the chosen word falls into the correct bin # Eliminate the words that are not available in the session # Ensure that a word cannot be accidentally paired with itself # If there are no good words to choose from, restart # Choose the second word randomly # Add the pairs to list_pairs, delete them from the pool of allowed words # If the list is bad, add the words back to the pool of allowed words # If the list is good, add the list_pairs to sess_pairs, # Otherwise, try the session again (up to 50 times), then restart # If the whole session went successfully # Get the pairs from the lists, add them backwards and forwards to sess_pair_dict # If the session did not go well, try again. :param pairs: :param cond: :param config: :return: # Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error) # Loop through sessions #print '\nPlacing session', n, ':', #sys.stdout.flush() # Loop through lists within each session #print m, #sys.stdout.flush() # Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant # Retrieve list length for the current list # For 12-item lists, select a random solution template and assign word pairs to the variables in the # template, such that one pair from each bin has adjacent presentation and one pair from each bin has # distant presentation # Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair # Choose a random valid solution # Each entry in the solution list is a string containing a letter followed by 0 or 1 # The letter corresponds to the word pair and the number corresponds to the item in the pair. # Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs. # For 24-item lists, create two 12-item lists based on random solution templates and concatenate them. # Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair # Choose a random valid solution # Each entry in the solution list is a string containing a letter followed by 0 or 1 # The letter corresponds to the word pair and the number corresponds to the item in the pair. # Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs. # Add finalized list to the session Generate all lists for a participant, including the conditions, word pairs and word ordering. This function saves the results to a json file labelled with the participant's number. # Read in the semantic association matrix # Create three semantic similarity bins and sort word pairs by bin # Read in the word pool # Randomize list conditions (list length, presentation rate, modality, distractor duration) # Choose all of the pairs to be used in the experiment # Create all lists by placing the word pairs in appropriate positions # Add practice lists | 2.924316 | 3 |
fastestimator/dataset/data/cifar10.py | DwijayDS/fastestimator | 57 | 7438 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from fastestimator.dataset.numpy_dataset import NumpyDataset
def load_data(image_key: str = "x", label_key: str = "y") -> Tuple[NumpyDataset, NumpyDataset]:
"""Load and return the CIFAR10 dataset.
Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets.
Args:
image_key: The key for image.
label_key: The key for label.
Returns:
(train_data, eval_data)
"""
print("\033[93m {}\033[00m".format("FastEstimator-Warn: Consider using the ciFAIR10 dataset instead."))
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
train_data = NumpyDataset({image_key: x_train, label_key: y_train})
eval_data = NumpyDataset({image_key: x_eval, label_key: y_eval})
return train_data, eval_data
| # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from fastestimator.dataset.numpy_dataset import NumpyDataset
def load_data(image_key: str = "x", label_key: str = "y") -> Tuple[NumpyDataset, NumpyDataset]:
"""Load and return the CIFAR10 dataset.
Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets.
Args:
image_key: The key for image.
label_key: The key for label.
Returns:
(train_data, eval_data)
"""
print("\033[93m {}\033[00m".format("FastEstimator-Warn: Consider using the ciFAIR10 dataset instead."))
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
train_data = NumpyDataset({image_key: x_train, label_key: y_train})
eval_data = NumpyDataset({image_key: x_eval, label_key: y_eval})
return train_data, eval_data
| en | 0.781444 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Load and return the CIFAR10 dataset. Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets. Args: image_key: The key for image. label_key: The key for label. Returns: (train_data, eval_data) | 2.641201 | 3 |
examples/single_message.py | Inrixia/pyais | 51 | 7439 | <filename>examples/single_message.py
from pyais.messages import NMEAMessage
message = NMEAMessage(b"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
# or
message = NMEAMessage.from_string("!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
| <filename>examples/single_message.py
from pyais.messages import NMEAMessage
message = NMEAMessage(b"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
# or
message = NMEAMessage.from_string("!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
| none | 1 | 2.131441 | 2 |
|
30_days_of_code_10.py | sercangul/HackerRank | 0 | 7440 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:02:33 2019
@author: sercangul
"""
def maxConsecutiveOnes(x):
# Initialize result
count = 0
# Count the number of iterations to
# reach x = 0.
while (x!=0):
# This operation reduces length
# of every sequence of 1s by one.
x = (x & (x << 1))
count=count+1
return count
if __name__ == '__main__':
n = int(input())
result = maxConsecutiveOnes(n)
print(result) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:02:33 2019
@author: sercangul
"""
def maxConsecutiveOnes(x):
# Initialize result
count = 0
# Count the number of iterations to
# reach x = 0.
while (x!=0):
# This operation reduces length
# of every sequence of 1s by one.
x = (x & (x << 1))
count=count+1
return count
if __name__ == '__main__':
n = int(input())
result = maxConsecutiveOnes(n)
print(result) | en | 0.832054 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Jun 3 19:02:33 2019 @author: sercangul # Initialize result # Count the number of iterations to # reach x = 0. # This operation reduces length # of every sequence of 1s by one. | 3.716677 | 4 |
artap/algorithm_cmaes.py | artap-framework/artap | 5 | 7441 | <gh_stars>1-10
import numpy as np
from .problem import Problem
from .algorithm_genetic import GeneralEvolutionaryAlgorithm
from .individual import Individual
from .operators import CustomGenerator, nondominated_truncate, RandomGenerator, UniformGenerator
import time
class CMA_ES(GeneralEvolutionaryAlgorithm):
"""
Implementation of CMA_ES, Covariance Matrix Adaptation Evolutionary strategy (CMA_ES).
The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) [1] is one of the most effective approaches
for black-box optimization, in which objective functions cannot be specified explicitly in general.
CMA-ES outperformed over 100 black-box optimization approaches for a variety of benchmark problems [2].
The CMA-ES algorithm selects solutions from a multivariate gaussian distribution. Following the evaluation of
all solutions, the solutions are sorted by evaluation values, and the distribution parameters
(i.e., the mean vector and the covariance matrix) are updated depending on the ranking of evaluation values.
[1] <NAME> and <NAME>. Completely derandomized self-adaptation in evolution strategies.
Evol. Comput., 9(2):159–195, June 2001.
DOI: http://dx.doi.org/10.1162/106365601750190398.
[2] <NAME>. The CMA Evolution Strategy: A Comparing Review, pages 75–102. Springer Berlin Heidelberg,
Berlin, Heidelberg, 2006.
DOI: https://doi.org/10.1007/3-540-32494-1_4.
"""
def __init__(self, problem: Problem, name="Covariance Matrix Adaptation Evolutionary Strategy"):
super().__init__(problem, name)
# Population Size
self.n_samples = self.options['max_population_size']
# Number of generation
self.t = self.options['max_population_number']
self.individual_features['velocity'] = dict()
self.individual_features['best_cost'] = dict()
self.individual_features['best_vector'] = dict()
self.individual_features['dominate'] = []
self.individual_features['crowding_distance'] = 0
self.individual_features['domination_counter'] = 0
# Add front_number feature
self.individual_features['front_number'] = 0
self.dim_theta = len(self.problem.parameters)
# Elite ratio percentage
self.top_p = 30
# Range of values
self.min_val = 0
self.max_val = 1
# Number of Runs
self.runs = 1
self.theta_mean = np.random.uniform(self.min_val, self.max_val, self.dim_theta)
# self.individuals = []
theta_std = np.random.uniform(self.max_val - 1, self.max_val, self.dim_theta)
self.theta_cov = np.diag(theta_std)
self.generator = CustomGenerator(self.problem.parameters, self.individual_features)
# self.fit_gaussian()
def fit_gaussian(self):
"""
generates individuals from a multivariate gaussian distribution
:param
:return population: list of individuals
"""
theta = np.random.multivariate_normal(self.theta_mean, self.theta_cov, self.options['max_population_size'])
individuals = np.clip(theta, self.min_val, self.max_val)
self.generator.init(individuals)
individuals = self.generator.generate()
return individuals
def take_elite(self, candidates):
"""
Based on the fitness, it will take top individuals
:param candidates
:return elite: list of top individuals
"""
n_top = int((self.n_samples * self.top_p) / 100)
elite = candidates[:n_top]
return elite
def compute_new_mean(self, e_candidates):
"""
Update distribution parameters. Here, the mean vector will be updated depending on the ranking of
evaluation values.
:param e_candidates
:return new_means vector
"""
new_means = np.mean(e_candidates, axis=0)
return new_means
def compute_new_cov(self, e_candidates):
"""
Update distribution parameters. Here, the covariance matrix will be updated depending on the ranking of
evaluation values
:param e_candidates
:return new_covariance matrix
"""
e_candidates = np.array(e_candidates)
I = np.identity(self.dim_theta)
cov = np.zeros((self.dim_theta, self.dim_theta))
for i in range(self.dim_theta):
for j in range(self.dim_theta):
cov[i, j] = np.sum(
((e_candidates[:, i] - self.theta_mean[i]) * (e_candidates[:, j] - self.theta_mean[j])), axis=0)
return 1 / e_candidates.shape[0] * cov + I * 1e-3
def run(self):
mean_fitness = []
best_fitness = []
worst_fitness = []
fitness = []
individuals = self.fit_gaussian()
for individual in individuals:
# append to problem
self.problem.individuals.append(individual)
# add to population
individual.population_id = 0
self.problem.data_store.sync_individual(individual)
self.evaluate(individuals)
start = time.time()
self.problem.logger.info("CMA_ES: {}/{}".format(self.options['max_population_number'],
self.options['max_population_size']))
for it in range(self.options['max_population_number']):
lists = []
for individual in individuals:
# fitness.append(individual.costs)
lists.append(individual.costs)
lists = np.array(lists)
mean_fitness.append(np.mean(lists))
best_fitness.append(np.min(lists))
worst_fitness.append(np.max(lists))
fitness.append(lists)
elite = self.take_elite(individuals)
e_candidates = [i.vector for i in elite]
self.theta_cov = self.compute_new_cov(e_candidates)
self.theta_mean = self.compute_new_mean(e_candidates)
individuals = self.fit_gaussian()
# individuals = nondominated_truncate(new_individuals, self.options['max_population_size'])
self.evaluate(individuals)
for individual in individuals:
# add to population
individual.population_id = it + 1
# append to problem
self.problem.individuals.append(individual)
# sync to datastore
self.problem.data_store.sync_individual(individual)
t = time.time() - start
self.problem.logger.info("CMA_ES: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
| import numpy as np
from .problem import Problem
from .algorithm_genetic import GeneralEvolutionaryAlgorithm
from .individual import Individual
from .operators import CustomGenerator, nondominated_truncate, RandomGenerator, UniformGenerator
import time
class CMA_ES(GeneralEvolutionaryAlgorithm):
"""
Implementation of CMA_ES, Covariance Matrix Adaptation Evolutionary strategy (CMA_ES).
The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) [1] is one of the most effective approaches
for black-box optimization, in which objective functions cannot be specified explicitly in general.
CMA-ES outperformed over 100 black-box optimization approaches for a variety of benchmark problems [2].
The CMA-ES algorithm selects solutions from a multivariate gaussian distribution. Following the evaluation of
all solutions, the solutions are sorted by evaluation values, and the distribution parameters
(i.e., the mean vector and the covariance matrix) are updated depending on the ranking of evaluation values.
[1] <NAME> and <NAME>. Completely derandomized self-adaptation in evolution strategies.
Evol. Comput., 9(2):159–195, June 2001.
DOI: http://dx.doi.org/10.1162/106365601750190398.
[2] <NAME>. The CMA Evolution Strategy: A Comparing Review, pages 75–102. Springer Berlin Heidelberg,
Berlin, Heidelberg, 2006.
DOI: https://doi.org/10.1007/3-540-32494-1_4.
"""
def __init__(self, problem: Problem, name="Covariance Matrix Adaptation Evolutionary Strategy"):
super().__init__(problem, name)
# Population Size
self.n_samples = self.options['max_population_size']
# Number of generation
self.t = self.options['max_population_number']
self.individual_features['velocity'] = dict()
self.individual_features['best_cost'] = dict()
self.individual_features['best_vector'] = dict()
self.individual_features['dominate'] = []
self.individual_features['crowding_distance'] = 0
self.individual_features['domination_counter'] = 0
# Add front_number feature
self.individual_features['front_number'] = 0
self.dim_theta = len(self.problem.parameters)
# Elite ratio percentage
self.top_p = 30
# Range of values
self.min_val = 0
self.max_val = 1
# Number of Runs
self.runs = 1
self.theta_mean = np.random.uniform(self.min_val, self.max_val, self.dim_theta)
# self.individuals = []
theta_std = np.random.uniform(self.max_val - 1, self.max_val, self.dim_theta)
self.theta_cov = np.diag(theta_std)
self.generator = CustomGenerator(self.problem.parameters, self.individual_features)
# self.fit_gaussian()
def fit_gaussian(self):
"""
generates individuals from a multivariate gaussian distribution
:param
:return population: list of individuals
"""
theta = np.random.multivariate_normal(self.theta_mean, self.theta_cov, self.options['max_population_size'])
individuals = np.clip(theta, self.min_val, self.max_val)
self.generator.init(individuals)
individuals = self.generator.generate()
return individuals
def take_elite(self, candidates):
"""
Based on the fitness, it will take top individuals
:param candidates
:return elite: list of top individuals
"""
n_top = int((self.n_samples * self.top_p) / 100)
elite = candidates[:n_top]
return elite
def compute_new_mean(self, e_candidates):
"""
Update distribution parameters. Here, the mean vector will be updated depending on the ranking of
evaluation values.
:param e_candidates
:return new_means vector
"""
new_means = np.mean(e_candidates, axis=0)
return new_means
def compute_new_cov(self, e_candidates):
"""
Update distribution parameters. Here, the covariance matrix will be updated depending on the ranking of
evaluation values
:param e_candidates
:return new_covariance matrix
"""
e_candidates = np.array(e_candidates)
I = np.identity(self.dim_theta)
cov = np.zeros((self.dim_theta, self.dim_theta))
for i in range(self.dim_theta):
for j in range(self.dim_theta):
cov[i, j] = np.sum(
((e_candidates[:, i] - self.theta_mean[i]) * (e_candidates[:, j] - self.theta_mean[j])), axis=0)
return 1 / e_candidates.shape[0] * cov + I * 1e-3
def run(self):
mean_fitness = []
best_fitness = []
worst_fitness = []
fitness = []
individuals = self.fit_gaussian()
for individual in individuals:
# append to problem
self.problem.individuals.append(individual)
# add to population
individual.population_id = 0
self.problem.data_store.sync_individual(individual)
self.evaluate(individuals)
start = time.time()
self.problem.logger.info("CMA_ES: {}/{}".format(self.options['max_population_number'],
self.options['max_population_size']))
for it in range(self.options['max_population_number']):
lists = []
for individual in individuals:
# fitness.append(individual.costs)
lists.append(individual.costs)
lists = np.array(lists)
mean_fitness.append(np.mean(lists))
best_fitness.append(np.min(lists))
worst_fitness.append(np.max(lists))
fitness.append(lists)
elite = self.take_elite(individuals)
e_candidates = [i.vector for i in elite]
self.theta_cov = self.compute_new_cov(e_candidates)
self.theta_mean = self.compute_new_mean(e_candidates)
individuals = self.fit_gaussian()
# individuals = nondominated_truncate(new_individuals, self.options['max_population_size'])
self.evaluate(individuals)
for individual in individuals:
# add to population
individual.population_id = it + 1
# append to problem
self.problem.individuals.append(individual)
# sync to datastore
self.problem.data_store.sync_individual(individual)
t = time.time() - start
self.problem.logger.info("CMA_ES: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all() | en | 0.690525 | Implementation of CMA_ES, Covariance Matrix Adaptation Evolutionary strategy (CMA_ES). The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) [1] is one of the most effective approaches for black-box optimization, in which objective functions cannot be specified explicitly in general. CMA-ES outperformed over 100 black-box optimization approaches for a variety of benchmark problems [2]. The CMA-ES algorithm selects solutions from a multivariate gaussian distribution. Following the evaluation of all solutions, the solutions are sorted by evaluation values, and the distribution parameters (i.e., the mean vector and the covariance matrix) are updated depending on the ranking of evaluation values. [1] <NAME> and <NAME>. Completely derandomized self-adaptation in evolution strategies. Evol. Comput., 9(2):159–195, June 2001. DOI: http://dx.doi.org/10.1162/106365601750190398. [2] <NAME>. The CMA Evolution Strategy: A Comparing Review, pages 75–102. Springer Berlin Heidelberg, Berlin, Heidelberg, 2006. DOI: https://doi.org/10.1007/3-540-32494-1_4. # Population Size # Number of generation # Add front_number feature # Elite ratio percentage # Range of values # Number of Runs # self.individuals = [] # self.fit_gaussian() generates individuals from a multivariate gaussian distribution :param :return population: list of individuals Based on the fitness, it will take top individuals :param candidates :return elite: list of top individuals Update distribution parameters. Here, the mean vector will be updated depending on the ranking of evaluation values. :param e_candidates :return new_means vector Update distribution parameters. Here, the covariance matrix will be updated depending on the ranking of evaluation values :param e_candidates :return new_covariance matrix # append to problem # add to population # fitness.append(individual.costs) # individuals = nondominated_truncate(new_individuals, self.options['max_population_size']) # add to population # append to problem # sync to datastore # sync changed individual informations | 2.624194 | 3 |
apns_proxy_client/core.py | hagino3000/apns-proxy-client-py | 0 | 7442 | <reponame>hagino3000/apns-proxy-client-py
# -*- coding: utf-8 -*-
"""
APNS Proxy Serverのクライアント
"""
import time
import zmq
import simplejson as json
READ_TIMEOUT = 1500 # msec
FLUSH_TIMEOUT = 5000 # msec
COMMAND_ASK_ADDRESS = b'\1'
COMMAND_SEND = b'\2'
COMMAND_FEEDBACK = b'\3'
DEVICE_TOKEN_LENGTH = 64
JSON_ALERT_KEY_SET = set(['body', 'action_loc_key', 'loc_key', 'loc_args', 'launch_image'])
class APNSProxyClient(object):
def __init__(self, host, port, application_id):
"""
ZMQコンテキストとソケットの初期化
"""
if host is None or not isinstance(host, str):
raise ValueError("host must be string")
if port is None or not isinstance(port, int):
raise ValueError("host must be int type")
self.host = host
self.port = port
self.context = zmq.Context()
self.context.setsockopt(zmq.LINGER, FLUSH_TIMEOUT)
self.communicator = self.context.socket(zmq.REQ)
self.publisher = self.context.socket(zmq.PUSH)
self.connected = False
if not isinstance(application_id, str):
raise ValueError("application_id must be string type")
self.application_id = application_id
def __enter__(self):
self.connect()
def connect(self):
"""リモートサーバーへ接続"""
if self.connected is False:
self.communicator.connect(self.build_address(self.port))
push_port = self.get_push_port()
self.publisher.connect(self.build_address(push_port))
self.connected = True
def build_address(self, port):
return "tcp://%s:%s" % (self.host, port)
def get_push_port(self):
"""
PUSH-PULL接続用のポートを取得する
"""
self.communicator.send(COMMAND_ASK_ADDRESS)
poller = zmq.Poller()
poller.register(self.communicator, zmq.POLLIN)
if poller.poll(READ_TIMEOUT):
return self.communicator.recv()
else:
self.close()
raise IOError("Cannot connect to APNs Proxy Server. Timeout!!")
def send(self, token, alert, sound='default', badge=None, content_available=False,
custom=None, expiry=None, priority=None, test=False):
"""
デバイストークンの送信
"""
self._check_token(token)
self._check_alert(alert)
self._check_custom(custom)
self.publisher.send(self._serialize(
COMMAND_SEND, token, alert, sound, badge, content_available, custom,
expiry, priority, test
))
def get_feedback(self):
data = {
'appid': self.application_id,
}
command = COMMAND_FEEDBACK + json.dumps(data, ensure_ascii=True)
self.communicator.send(command)
return json.loads(self.communicator.recv())
@staticmethod
def _check_token(token):
if len(token) != DEVICE_TOKEN_LENGTH:
raise ValueError('Invalid token length %s' % token)
@staticmethod
def _check_alert(alert):
if (alert is None or isinstance(alert, basestring)):
return
elif isinstance(alert, dict):
if len(set(alert.keys()) - JSON_ALERT_KEY_SET) > 0:
raise ValueError('JSON Alert allows only'
'body, action_loc_key, loc_key, loc_args, launch_image')
else:
raise ValueError('alert must be string, unicode or dict type')
@staticmethod
def _check_custom(custom):
if custom is None or isinstance(custom, dict):
return
raise ValueError('custom must be dict type')
def _serialize(self, command, token, alert, sound, badge, content_available, custom,
expiry, priority, test):
"""
送信データのフォーマット
"""
aps = {}
if alert is not None:
aps['alert'] = alert
if sound is not None:
aps['sound'] = sound
if badge is not None:
aps['badge'] = badge
if content_available is True:
aps['content_available'] = True
if custom is not None:
aps['custom'] = custom
data = {
'appid': self.application_id,
'token': token,
'aps': aps,
'test': test
}
if expiry is not None:
data['expiry'] = expiry
if priority is not None:
data['priority'] = priority
return command + json.dumps(data, ensure_ascii=True)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self._close()
return False
self.close()
def close(self):
start_time = time.time()
self._close()
end_time = time.time()
if (end_time - start_time) > (FLUSH_TIMEOUT - 20)/1000.0:
raise IOError('Timeout close operation. Some messages may not reached to server.')
return True
def _close(self):
self.publisher.close()
self.communicator.close()
self.context.term()
| # -*- coding: utf-8 -*-
"""
APNS Proxy Serverのクライアント
"""
import time
import zmq
import simplejson as json
READ_TIMEOUT = 1500 # msec
FLUSH_TIMEOUT = 5000 # msec
COMMAND_ASK_ADDRESS = b'\1'
COMMAND_SEND = b'\2'
COMMAND_FEEDBACK = b'\3'
DEVICE_TOKEN_LENGTH = 64
JSON_ALERT_KEY_SET = set(['body', 'action_loc_key', 'loc_key', 'loc_args', 'launch_image'])
class APNSProxyClient(object):
def __init__(self, host, port, application_id):
"""
ZMQコンテキストとソケットの初期化
"""
if host is None or not isinstance(host, str):
raise ValueError("host must be string")
if port is None or not isinstance(port, int):
raise ValueError("host must be int type")
self.host = host
self.port = port
self.context = zmq.Context()
self.context.setsockopt(zmq.LINGER, FLUSH_TIMEOUT)
self.communicator = self.context.socket(zmq.REQ)
self.publisher = self.context.socket(zmq.PUSH)
self.connected = False
if not isinstance(application_id, str):
raise ValueError("application_id must be string type")
self.application_id = application_id
def __enter__(self):
self.connect()
def connect(self):
"""リモートサーバーへ接続"""
if self.connected is False:
self.communicator.connect(self.build_address(self.port))
push_port = self.get_push_port()
self.publisher.connect(self.build_address(push_port))
self.connected = True
def build_address(self, port):
return "tcp://%s:%s" % (self.host, port)
def get_push_port(self):
"""
PUSH-PULL接続用のポートを取得する
"""
self.communicator.send(COMMAND_ASK_ADDRESS)
poller = zmq.Poller()
poller.register(self.communicator, zmq.POLLIN)
if poller.poll(READ_TIMEOUT):
return self.communicator.recv()
else:
self.close()
raise IOError("Cannot connect to APNs Proxy Server. Timeout!!")
def send(self, token, alert, sound='default', badge=None, content_available=False,
custom=None, expiry=None, priority=None, test=False):
"""
デバイストークンの送信
"""
self._check_token(token)
self._check_alert(alert)
self._check_custom(custom)
self.publisher.send(self._serialize(
COMMAND_SEND, token, alert, sound, badge, content_available, custom,
expiry, priority, test
))
def get_feedback(self):
data = {
'appid': self.application_id,
}
command = COMMAND_FEEDBACK + json.dumps(data, ensure_ascii=True)
self.communicator.send(command)
return json.loads(self.communicator.recv())
@staticmethod
def _check_token(token):
if len(token) != DEVICE_TOKEN_LENGTH:
raise ValueError('Invalid token length %s' % token)
@staticmethod
def _check_alert(alert):
if (alert is None or isinstance(alert, basestring)):
return
elif isinstance(alert, dict):
if len(set(alert.keys()) - JSON_ALERT_KEY_SET) > 0:
raise ValueError('JSON Alert allows only'
'body, action_loc_key, loc_key, loc_args, launch_image')
else:
raise ValueError('alert must be string, unicode or dict type')
@staticmethod
def _check_custom(custom):
if custom is None or isinstance(custom, dict):
return
raise ValueError('custom must be dict type')
def _serialize(self, command, token, alert, sound, badge, content_available, custom,
expiry, priority, test):
"""
送信データのフォーマット
"""
aps = {}
if alert is not None:
aps['alert'] = alert
if sound is not None:
aps['sound'] = sound
if badge is not None:
aps['badge'] = badge
if content_available is True:
aps['content_available'] = True
if custom is not None:
aps['custom'] = custom
data = {
'appid': self.application_id,
'token': token,
'aps': aps,
'test': test
}
if expiry is not None:
data['expiry'] = expiry
if priority is not None:
data['priority'] = priority
return command + json.dumps(data, ensure_ascii=True)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self._close()
return False
self.close()
def close(self):
start_time = time.time()
self._close()
end_time = time.time()
if (end_time - start_time) > (FLUSH_TIMEOUT - 20)/1000.0:
raise IOError('Timeout close operation. Some messages may not reached to server.')
return True
def _close(self):
self.publisher.close()
self.communicator.close()
self.context.term() | ja | 0.998825 | # -*- coding: utf-8 -*- APNS Proxy Serverのクライアント # msec # msec ZMQコンテキストとソケットの初期化 リモートサーバーへ接続 PUSH-PULL接続用のポートを取得する デバイストークンの送信 送信データのフォーマット | 2.424574 | 2 |
003_joint_probabilities.py | svetlanama/snowball | 0 | 7443 | import sys
sys.path.insert(0, '..')
import numpy
import time
import ConfigParser
import topicmodel
def main():
# read configuration file
config = ConfigParser.ConfigParser()
config.readfp(open('config.ini'))
dataDir = config.get('main', 'dataDir')
io = topicmodel.io(dataDir)
model = topicmodel.model(dataDir)
wordDictionary = io.load_csv_as_dict('out-word-dictionary-rare-words-excluded.csv')
model.set_word_dictionary(wordDictionary)
# print wordDictionary
# return
wwcovar=model.coccurences('tmp-all-paper-tokens.csv','+','.')
numpy.save(dataDir + '/tmp-joint-probabilities.npy', wwcovar)
return
if __name__ == "__main__":
t0 = time.time()
main()
t1 = time.time()
print "finished"
print "time=", t1 - t0
| import sys
sys.path.insert(0, '..')
import numpy
import time
import ConfigParser
import topicmodel
def main():
# read configuration file
config = ConfigParser.ConfigParser()
config.readfp(open('config.ini'))
dataDir = config.get('main', 'dataDir')
io = topicmodel.io(dataDir)
model = topicmodel.model(dataDir)
wordDictionary = io.load_csv_as_dict('out-word-dictionary-rare-words-excluded.csv')
model.set_word_dictionary(wordDictionary)
# print wordDictionary
# return
wwcovar=model.coccurences('tmp-all-paper-tokens.csv','+','.')
numpy.save(dataDir + '/tmp-joint-probabilities.npy', wwcovar)
return
if __name__ == "__main__":
t0 = time.time()
main()
t1 = time.time()
print "finished"
print "time=", t1 - t0
| en | 0.580016 | # read configuration file # print wordDictionary # return | 2.149346 | 2 |
tests/bugs/core_4318_test.py | FirebirdSQL/firebird-qa | 1 | 7444 | <reponame>FirebirdSQL/firebird-qa
#coding:utf-8
#
# id: bugs.core_4318
# title: Regression: Predicates involving PSQL variables/parameters are not pushed inside the aggregation
# decription:
# tracker_id: CORE-4318
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table t2 (
id integer not null,
t1_id integer
);
commit;
recreate table t1 (
id integer not null
);
commit;
set term ^;
execute block
as
declare variable i integer = 0;
begin
while (i < 1000) do begin
i = i + 1;
insert into t2(id, t1_id) values(:i, mod(:i, 10));
merge into t1 using (
select mod(:i, 10) as f from rdb$database
) src on t1.id = src.f
when not matched then
insert (id) values(src.f);
end -- while (i < 1000) do begin
end^
set term ;^
commit;
alter table t1 add constraint pk_t1 primary key (id);
alter table t2 add constraint pk_t2 primary key (id);
alter table t2 add constraint fk_t2_ref_t1 foreign key (t1_id) references t1(id);
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set explain on;
set planonly;
set term ^;
execute block
returns (
s integer
)
as
declare variable v integer = 1;
begin
with t as (
select t1_id as t1_id, sum(id) as s
from t2
group by 1
)
select s
from t
where t1_id = :v
into :s;
suspend;
end
^
set term ;^
-- In 3.0.0.30837 plan was:
-- Select Expression
-- -> Singularity Check
-- -> Filter
-- -> Aggregate
-- -> Table "T T2" Access By ID
-- -> Index "FK_T2_REF_T1" Scan
-- (i.e. there was NO "Filter" between "Aggregate" and "Table "T T2" Access By ID")
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Select Expression
-> Singularity Check
-> Filter
-> Aggregate
-> Filter
-> Table "T2" as "T T2" Access By ID
-> Index "FK_T2_REF_T1" Range Scan (full match)
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| #coding:utf-8
#
# id: bugs.core_4318
# title: Regression: Predicates involving PSQL variables/parameters are not pushed inside the aggregation
# decription:
# tracker_id: CORE-4318
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table t2 (
id integer not null,
t1_id integer
);
commit;
recreate table t1 (
id integer not null
);
commit;
set term ^;
execute block
as
declare variable i integer = 0;
begin
while (i < 1000) do begin
i = i + 1;
insert into t2(id, t1_id) values(:i, mod(:i, 10));
merge into t1 using (
select mod(:i, 10) as f from rdb$database
) src on t1.id = src.f
when not matched then
insert (id) values(src.f);
end -- while (i < 1000) do begin
end^
set term ;^
commit;
alter table t1 add constraint pk_t1 primary key (id);
alter table t2 add constraint pk_t2 primary key (id);
alter table t2 add constraint fk_t2_ref_t1 foreign key (t1_id) references t1(id);
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set explain on;
set planonly;
set term ^;
execute block
returns (
s integer
)
as
declare variable v integer = 1;
begin
with t as (
select t1_id as t1_id, sum(id) as s
from t2
group by 1
)
select s
from t
where t1_id = :v
into :s;
suspend;
end
^
set term ;^
-- In 3.0.0.30837 plan was:
-- Select Expression
-- -> Singularity Check
-- -> Filter
-- -> Aggregate
-- -> Table "T T2" Access By ID
-- -> Index "FK_T2_REF_T1" Scan
-- (i.e. there was NO "Filter" between "Aggregate" and "Table "T T2" Access By ID")
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Select Expression
-> Singularity Check
-> Filter
-> Aggregate
-> Filter
-> Table "T2" as "T T2" Access By ID
-> Index "FK_T2_REF_T1" Range Scan (full match)
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout | en | 0.696103 | #coding:utf-8 # # id: bugs.core_4318 # title: Regression: Predicates involving PSQL variables/parameters are not pushed inside the aggregation # decription: # tracker_id: CORE-4318 # min_versions: ['3.0'] # versions: 3.0 # qmid: None # version: 3.0 # resources: None recreate table t2 ( id integer not null, t1_id integer ); commit; recreate table t1 ( id integer not null ); commit; set term ^; execute block as declare variable i integer = 0; begin while (i < 1000) do begin i = i + 1; insert into t2(id, t1_id) values(:i, mod(:i, 10)); merge into t1 using ( select mod(:i, 10) as f from rdb$database ) src on t1.id = src.f when not matched then insert (id) values(src.f); end -- while (i < 1000) do begin end^ set term ;^ commit; alter table t1 add constraint pk_t1 primary key (id); alter table t2 add constraint pk_t2 primary key (id); alter table t2 add constraint fk_t2_ref_t1 foreign key (t1_id) references t1(id); commit; set explain on; set planonly; set term ^; execute block returns ( s integer ) as declare variable v integer = 1; begin with t as ( select t1_id as t1_id, sum(id) as s from t2 group by 1 ) select s from t where t1_id = :v into :s; suspend; end ^ set term ;^ -- In 3.0.0.30837 plan was: -- Select Expression -- -> Singularity Check -- -> Filter -- -> Aggregate -- -> Table "T T2" Access By ID -- -> Index "FK_T2_REF_T1" Scan -- (i.e. there was NO "Filter" between "Aggregate" and "Table "T T2" Access By ID") Select Expression -> Singularity Check -> Filter -> Aggregate -> Filter -> Table "T2" as "T T2" Access By ID -> Index "FK_T2_REF_T1" Range Scan (full match) | 1.259938 | 1 |
dictionary.py | WilliamHackspeare/profanity-percentage | 0 | 7445 | <gh_stars>0
#Import the json library to parse JSON file to Python
import json
#Import list of punctuation characters from the string library
from string import punctuation as p
#This method checks if the given word is a profanity
def is_profanity(word):
#Open the JSON file
words_file = open('data.json')
#Parse the JSON file as a dictionary and extract the values
bad_words = json.load(words_file).values()
#Check and return if the word is a bad work
return word in bad_words
#This method calculates the degree of profanity for a list of strings
def calculate_profanity(sentence):
#Initialise the count of bad words
count_bad = 0
#Initialise the total count of words
count = 0
#Loop through the list of words
for word in sentence:
#Check if the word, stripped of any leading or trailing punctuations or spaces, is a bad word and update count
if is_profanity(word.strip(p+" ")):
count_bad += 1
count += 1
#Calculate the degree of the list
deg = (count_bad/count)*100
#Return the degree
return deg | #Import the json library to parse JSON file to Python
import json
#Import list of punctuation characters from the string library
from string import punctuation as p
#This method checks if the given word is a profanity
def is_profanity(word):
#Open the JSON file
words_file = open('data.json')
#Parse the JSON file as a dictionary and extract the values
bad_words = json.load(words_file).values()
#Check and return if the word is a bad work
return word in bad_words
#This method calculates the degree of profanity for a list of strings
def calculate_profanity(sentence):
#Initialise the count of bad words
count_bad = 0
#Initialise the total count of words
count = 0
#Loop through the list of words
for word in sentence:
#Check if the word, stripped of any leading or trailing punctuations or spaces, is a bad word and update count
if is_profanity(word.strip(p+" ")):
count_bad += 1
count += 1
#Calculate the degree of the list
deg = (count_bad/count)*100
#Return the degree
return deg | en | 0.7816 | #Import the json library to parse JSON file to Python #Import list of punctuation characters from the string library #This method checks if the given word is a profanity #Open the JSON file #Parse the JSON file as a dictionary and extract the values #Check and return if the word is a bad work #This method calculates the degree of profanity for a list of strings #Initialise the count of bad words #Initialise the total count of words #Loop through the list of words #Check if the word, stripped of any leading or trailing punctuations or spaces, is a bad word and update count #Calculate the degree of the list #Return the degree | 4.065308 | 4 |
setup.py | cyfrmedia/cerridwen | 25 | 7446 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
#NEWS = open(os.path.join(here, 'NEWS.txt')).read()
rootdir = os.path.dirname(os.path.abspath(__file__))
exec(open(rootdir + '/cerridwen/version.py').read())
version = __VERSION__
setup(name='cerridwen',
version=version,
description='Accurate solar system data for everyone',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='http://cerridwen.bluemagician.vc/',
license='MIT',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta"
, "Environment :: Console"
, "Intended Audience :: Science/Research"
, "Intended Audience :: Developers"
, "License :: OSI Approved :: MIT License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 3"
, "Topic :: Scientific/Engineering :: Astronomy"
, "Topic :: Other/Nonlisted Topic"
, "Topic :: Software Development :: Libraries :: Python Modules"
, "Topic :: Utilities"
],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['cerridwen'],
requires=['pyswisseph', 'numpy', 'astropy(>=0.4)'],
extras_require={'Flask':['flask']},
entry_points={
'console_scripts':
['cerridwen = cerridwen.cli:main',
'cerridwen-server = cerridwen.api_server:main [Flask]']
})
| from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
#NEWS = open(os.path.join(here, 'NEWS.txt')).read()
rootdir = os.path.dirname(os.path.abspath(__file__))
exec(open(rootdir + '/cerridwen/version.py').read())
version = __VERSION__
setup(name='cerridwen',
version=version,
description='Accurate solar system data for everyone',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='http://cerridwen.bluemagician.vc/',
license='MIT',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta"
, "Environment :: Console"
, "Intended Audience :: Science/Research"
, "Intended Audience :: Developers"
, "License :: OSI Approved :: MIT License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 3"
, "Topic :: Scientific/Engineering :: Astronomy"
, "Topic :: Other/Nonlisted Topic"
, "Topic :: Software Development :: Libraries :: Python Modules"
, "Topic :: Utilities"
],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=['cerridwen'],
requires=['pyswisseph', 'numpy', 'astropy(>=0.4)'],
extras_require={'Flask':['flask']},
entry_points={
'console_scripts':
['cerridwen = cerridwen.cli:main',
'cerridwen-server = cerridwen.api_server:main [Flask]']
})
| en | 0.480315 | #NEWS = open(os.path.join(here, 'NEWS.txt')).read() # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers | 1.418959 | 1 |
pajbot/apiwrappers/authentication/access_token.py | JoachimFlottorp/pajbot | 128 | 7447 | <gh_stars>100-1000
import datetime
from abc import ABC, abstractmethod
import pajbot
class AccessToken(ABC):
SHOULD_REFRESH_THRESHOLD = 0.9
"""Fraction between 0 and 1 indicating what fraction/percentage of the specified full validity period
should actually be utilized. E.g. if this is set to 0.9, the implementation will refresh the token
once at least 90% of the full validity period (expires_in) is over."""
def __init__(self, access_token, created_at, expires_in, token_type, refresh_token, scope):
self.access_token = access_token
self.created_at = created_at
# can both be None
self.expires_in = expires_in
if self.expires_in is not None:
self.expires_at = self.created_at + self.expires_in
else:
self.expires_at = None
self.token_type = token_type
# can be None
self.refresh_token = refresh_token
# always a list, can be empty list
self.scope = scope
@abstractmethod
def can_refresh(self):
pass
def should_refresh(self):
"""Returns True if less than 10% of the token's lifetime remains, False otherwise"""
if not self.can_refresh():
return False
# intended lifetime of the token
if self.expires_at is not None:
expires_after = self.expires_at - self.created_at
else:
# this is a token that never expires
# because we don't want any issues, refresh it anyways
expires_after = datetime.timedelta(hours=1)
# how much time has passed since token creation
token_age = pajbot.utils.now() - self.created_at
# maximum token age before token should be refreshed (90% of the total token lifetime)
max_token_age = expires_after * self.SHOULD_REFRESH_THRESHOLD
# expired?
return token_age >= max_token_age
def jsonify(self):
"""serialize for storage"""
if self.expires_in is None:
expires_in_milliseconds = None
else:
expires_in_milliseconds = self.expires_in.total_seconds() * 1000
return {
"access_token": self.access_token,
"created_at": self.created_at.timestamp() * 1000,
"expires_in": expires_in_milliseconds,
"token_type": self.token_type,
"refresh_token": self.refresh_token,
"scope": self.scope,
}
@classmethod
def from_json(cls, json_data):
"""deserialize json produced by jsonify()"""
if json_data["expires_in"] is None:
expires_in = None
else:
expires_in = datetime.timedelta(milliseconds=json_data["expires_in"])
return cls(
access_token=json_data["access_token"],
created_at=pajbot.utils.datetime_from_utc_milliseconds(json_data["created_at"]),
expires_in=expires_in,
token_type=json_data["token_type"],
refresh_token=json_data["refresh_token"],
scope=json_data["scope"],
)
@classmethod
def from_api_response(cls, response):
"""Construct new object from twitch response json data"""
# expires_in is only missing for old Client-IDs to which twitch will respond with
# infinitely-lived tokens (the "expires_in" field is absent in that case).
expires_in_seconds = response.get("expires_in", None)
if expires_in_seconds is None:
expires_in = None
else:
expires_in = datetime.timedelta(seconds=expires_in_seconds)
return cls(
access_token=response["access_token"],
created_at=pajbot.utils.now(),
expires_in=expires_in,
token_type=response["token_type"],
refresh_token=response.get("refresh_token", None),
scope=response.get("scope", []),
)
@abstractmethod
def refresh(self, api):
pass
class UserAccessToken(AccessToken):
def can_refresh(self):
return self.refresh_token is not None
def refresh(self, api):
if not self.can_refresh():
raise ValueError("This user access token cannot be refreshed, it has no refresh token")
return api.refresh_user_access_token(self.refresh_token)
@staticmethod
def from_implicit_auth_flow_token(access_token):
return UserAccessToken(
access_token=access_token,
created_at=None,
expires_in=None,
token_type="bearer",
refresh_token=None,
scope=[],
)
class AppAccessToken(AccessToken):
def can_refresh(self):
return True
def refresh(self, api):
return api.get_app_access_token(self.scope)
| import datetime
from abc import ABC, abstractmethod
import pajbot
class AccessToken(ABC):
SHOULD_REFRESH_THRESHOLD = 0.9
"""Fraction between 0 and 1 indicating what fraction/percentage of the specified full validity period
should actually be utilized. E.g. if this is set to 0.9, the implementation will refresh the token
once at least 90% of the full validity period (expires_in) is over."""
def __init__(self, access_token, created_at, expires_in, token_type, refresh_token, scope):
self.access_token = access_token
self.created_at = created_at
# can both be None
self.expires_in = expires_in
if self.expires_in is not None:
self.expires_at = self.created_at + self.expires_in
else:
self.expires_at = None
self.token_type = token_type
# can be None
self.refresh_token = refresh_token
# always a list, can be empty list
self.scope = scope
@abstractmethod
def can_refresh(self):
pass
def should_refresh(self):
"""Returns True if less than 10% of the token's lifetime remains, False otherwise"""
if not self.can_refresh():
return False
# intended lifetime of the token
if self.expires_at is not None:
expires_after = self.expires_at - self.created_at
else:
# this is a token that never expires
# because we don't want any issues, refresh it anyways
expires_after = datetime.timedelta(hours=1)
# how much time has passed since token creation
token_age = pajbot.utils.now() - self.created_at
# maximum token age before token should be refreshed (90% of the total token lifetime)
max_token_age = expires_after * self.SHOULD_REFRESH_THRESHOLD
# expired?
return token_age >= max_token_age
def jsonify(self):
"""serialize for storage"""
if self.expires_in is None:
expires_in_milliseconds = None
else:
expires_in_milliseconds = self.expires_in.total_seconds() * 1000
return {
"access_token": self.access_token,
"created_at": self.created_at.timestamp() * 1000,
"expires_in": expires_in_milliseconds,
"token_type": self.token_type,
"refresh_token": self.refresh_token,
"scope": self.scope,
}
@classmethod
def from_json(cls, json_data):
"""deserialize json produced by jsonify()"""
if json_data["expires_in"] is None:
expires_in = None
else:
expires_in = datetime.timedelta(milliseconds=json_data["expires_in"])
return cls(
access_token=json_data["access_token"],
created_at=pajbot.utils.datetime_from_utc_milliseconds(json_data["created_at"]),
expires_in=expires_in,
token_type=json_data["token_type"],
refresh_token=json_data["refresh_token"],
scope=json_data["scope"],
)
@classmethod
def from_api_response(cls, response):
"""Construct new object from twitch response json data"""
# expires_in is only missing for old Client-IDs to which twitch will respond with
# infinitely-lived tokens (the "expires_in" field is absent in that case).
expires_in_seconds = response.get("expires_in", None)
if expires_in_seconds is None:
expires_in = None
else:
expires_in = datetime.timedelta(seconds=expires_in_seconds)
return cls(
access_token=response["access_token"],
created_at=pajbot.utils.now(),
expires_in=expires_in,
token_type=response["token_type"],
refresh_token=response.get("refresh_token", None),
scope=response.get("scope", []),
)
@abstractmethod
def refresh(self, api):
pass
class UserAccessToken(AccessToken):
def can_refresh(self):
return self.refresh_token is not None
def refresh(self, api):
if not self.can_refresh():
raise ValueError("This user access token cannot be refreshed, it has no refresh token")
return api.refresh_user_access_token(self.refresh_token)
@staticmethod
def from_implicit_auth_flow_token(access_token):
return UserAccessToken(
access_token=access_token,
created_at=None,
expires_in=None,
token_type="bearer",
refresh_token=None,
scope=[],
)
class AppAccessToken(AccessToken):
def can_refresh(self):
return True
def refresh(self, api):
return api.get_app_access_token(self.scope) | en | 0.898983 | Fraction between 0 and 1 indicating what fraction/percentage of the specified full validity period should actually be utilized. E.g. if this is set to 0.9, the implementation will refresh the token once at least 90% of the full validity period (expires_in) is over. # can both be None # can be None # always a list, can be empty list Returns True if less than 10% of the token's lifetime remains, False otherwise # intended lifetime of the token # this is a token that never expires # because we don't want any issues, refresh it anyways # how much time has passed since token creation # maximum token age before token should be refreshed (90% of the total token lifetime) # expired? serialize for storage deserialize json produced by jsonify() Construct new object from twitch response json data # expires_in is only missing for old Client-IDs to which twitch will respond with # infinitely-lived tokens (the "expires_in" field is absent in that case). | 3.268646 | 3 |
GHOST.py | RadicalAjay/Ghost_data | 1 | 7448 | #! /usr/bin/python3
# Description: Data_Ghost, concealing data into spaces and tabs making it imperceptable to human eyes.
# Author: <NAME>
# Github: Radical Ajay
class Ghost():
def __init__(self, file_name, output_format='txt'):
''' Converts ascii text to spaces and tabs '''
self.file_name = file_name
self.output_format = output_format
def ascii2bin(self, asc):
''' Converting ascii to bianry '''
return ''.join('{:08b}'.format(ord(i)) for i in asc)
def bin2ascii(self, bid):
''' Converting binary to ascii '''
return ''.join(chr(int(bid[i:i + 8], 2)) for i in range(0, len(bid), 8))
def ghost(self, filename):
''' Ghosting data converting it to spaces and tabs '''
with open(filename, 'w') as out_f:
with open(self.file_name, 'r') as in_f:
for in_data in in_f.readlines():
bin_data = self.ascii2bin(in_data)
out_data = bin_data.replace('1', '\t')
out_data = out_data.replace('0', ' ')
out_f.write(out_data)
def unghost(self, in_filename, out_filename):
''' Unghosting data converting back from spaces and tabs to human-readable text '''
with open(out_filename, 'w') as out_f:
with open(in_filename, 'r') as in_f:
for line in in_f.readlines():
line = line.replace('\t', '1')
line = line.replace(' ', '0')
out_f.write(self.bin2ascii(line))
# USAGE:
# ghoster = Ghost('data.txt')
# ghoster.ghost('ghosted.txt')
# ghoster.unghost('ghosted.txt', 'unghosted.txt')
| #! /usr/bin/python3
# Description: Data_Ghost, concealing data into spaces and tabs making it imperceptable to human eyes.
# Author: <NAME>
# Github: Radical Ajay
class Ghost():
def __init__(self, file_name, output_format='txt'):
''' Converts ascii text to spaces and tabs '''
self.file_name = file_name
self.output_format = output_format
def ascii2bin(self, asc):
''' Converting ascii to bianry '''
return ''.join('{:08b}'.format(ord(i)) for i in asc)
def bin2ascii(self, bid):
''' Converting binary to ascii '''
return ''.join(chr(int(bid[i:i + 8], 2)) for i in range(0, len(bid), 8))
def ghost(self, filename):
''' Ghosting data converting it to spaces and tabs '''
with open(filename, 'w') as out_f:
with open(self.file_name, 'r') as in_f:
for in_data in in_f.readlines():
bin_data = self.ascii2bin(in_data)
out_data = bin_data.replace('1', '\t')
out_data = out_data.replace('0', ' ')
out_f.write(out_data)
def unghost(self, in_filename, out_filename):
''' Unghosting data converting back from spaces and tabs to human-readable text '''
with open(out_filename, 'w') as out_f:
with open(in_filename, 'r') as in_f:
for line in in_f.readlines():
line = line.replace('\t', '1')
line = line.replace(' ', '0')
out_f.write(self.bin2ascii(line))
# USAGE:
# ghoster = Ghost('data.txt')
# ghoster.ghost('ghosted.txt')
# ghoster.unghost('ghosted.txt', 'unghosted.txt')
| en | 0.57006 | #! /usr/bin/python3 # Description: Data_Ghost, concealing data into spaces and tabs making it imperceptable to human eyes. # Author: <NAME> # Github: Radical Ajay Converts ascii text to spaces and tabs Converting ascii to bianry Converting binary to ascii Ghosting data converting it to spaces and tabs Unghosting data converting back from spaces and tabs to human-readable text # USAGE: # ghoster = Ghost('data.txt') # ghoster.ghost('ghosted.txt') # ghoster.unghost('ghosted.txt', 'unghosted.txt') | 3.476714 | 3 |
scan_predict.py | ychu196/chicago_scan | 0 | 7449 | <reponame>ychu196/chicago_scan
# Image classification using AWS Sagemaker and Linear Learner
# Program set up and import libraries
import numpy as np
import pandas as pd
import os
from sagemaker import get_execution_role
role = get_execution_role()
bucket = 'chi-hackathon-skin-images'
# Import Data
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='chi-hackathon-skin-images'
data_key = 'ISIC_0000000.json' # need a way to go through entire library
data_location = 's3://{}/{}'.format(bucket, data_key)
metadata_set = pd.read_json(data_location)
image_set = np.asarray(data_location)
# TBD - transform json data to array
# TBD - transform image data to dataframe
train_set = zip(image_set, metadata_set)
# Split Data into Train and Validate
import random
random.seed(9001)
split = np.random.rand(len(df)) < 0.8
valid_set = train_set[split]
train_set = train_set[~split]
# Train Model
import boto
import sagemaker
data_location = 's3://{}/linearlearner_highlevel_example/data'.format(bucket)
output_location = 's3://{}/linearlearner_highlevel_example/output'.format(bucket)
print('training data will be uploaded to: {}'.format(data_location))
print('training artifacts will be uploaded to: {}'.format(output_location))
sess = sagemaker.Session()
linear = sagemaker.estimator.Estimator(container, role, train_instance_count=1, rain_instance_type='ml.c4.xlarge',
output_path=output_location, sagemaker_session=sess)
linear.set_hyperparameters(feature_dim=784, predictor_type='binary_classifier', mini_batch_size=200)
linear.fit({'train': train_set})
# Deploy Model
linear_predictor = linear.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# Validate
from sagemaker.predictor import csv_serializer, json_deserializer
linear_predictor.content_type = 'text/csv'
linear_predictor.serializer = csv_serializer
linear_predictor.deserializer = json_deserializer
result = linear_predictor.predict(train_set[0][30:31])
print(result)
| # Image classification using AWS Sagemaker and Linear Learner
# Program set up and import libraries
import numpy as np
import pandas as pd
import os
from sagemaker import get_execution_role
role = get_execution_role()
bucket = 'chi-hackathon-skin-images'
# Import Data
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='chi-hackathon-skin-images'
data_key = 'ISIC_0000000.json' # need a way to go through entire library
data_location = 's3://{}/{}'.format(bucket, data_key)
metadata_set = pd.read_json(data_location)
image_set = np.asarray(data_location)
# TBD - transform json data to array
# TBD - transform image data to dataframe
train_set = zip(image_set, metadata_set)
# Split Data into Train and Validate
import random
random.seed(9001)
split = np.random.rand(len(df)) < 0.8
valid_set = train_set[split]
train_set = train_set[~split]
# Train Model
import boto
import sagemaker
data_location = 's3://{}/linearlearner_highlevel_example/data'.format(bucket)
output_location = 's3://{}/linearlearner_highlevel_example/output'.format(bucket)
print('training data will be uploaded to: {}'.format(data_location))
print('training artifacts will be uploaded to: {}'.format(output_location))
sess = sagemaker.Session()
linear = sagemaker.estimator.Estimator(container, role, train_instance_count=1, rain_instance_type='ml.c4.xlarge',
output_path=output_location, sagemaker_session=sess)
linear.set_hyperparameters(feature_dim=784, predictor_type='binary_classifier', mini_batch_size=200)
linear.fit({'train': train_set})
# Deploy Model
linear_predictor = linear.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# Validate
from sagemaker.predictor import csv_serializer, json_deserializer
linear_predictor.content_type = 'text/csv'
linear_predictor.serializer = csv_serializer
linear_predictor.deserializer = json_deserializer
result = linear_predictor.predict(train_set[0][30:31])
print(result) | en | 0.745929 | # Image classification using AWS Sagemaker and Linear Learner # Program set up and import libraries # Import Data # need a way to go through entire library # TBD - transform json data to array # TBD - transform image data to dataframe # Split Data into Train and Validate # Train Model # Deploy Model # Validate | 2.50494 | 3 |
gerber/am_statements.py | FixturFab/pcb-tools | 0 | 7450 | <reponame>FixturFab/pcb-tools<filename>gerber/am_statements.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2015 <NAME> <<EMAIL>> and <NAME>
# <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import asin
import math
from .primitives import *
from .utils import validate_coordinates, inch, metric, rotate_point
# TODO: Add support for aperture macro variables
__all__ = ['AMPrimitive', 'AMCommentPrimitive', 'AMCirclePrimitive',
'AMVectorLinePrimitive', 'AMOutlinePrimitive', 'AMPolygonPrimitive',
'AMMoirePrimitive', 'AMThermalPrimitive', 'AMCenterLinePrimitive',
'AMLowerLeftLinePrimitive', 'AMUnsupportPrimitive']
class AMPrimitive(object):
""" Aperture Macro Primitive Base Class
Parameters
----------
code : int
primitive shape code
exposure : str
on or off Primitives with exposure on create a slid part of
the macro aperture, and primitives with exposure off erase the
solid part created previously in the aperture macro definition.
.. note::
The erasing effect is limited to the aperture definition in
which it occurs.
Returns
-------
primitive : :class: `gerber.am_statements.AMPrimitive`
Raises
------
TypeError, ValueError
"""
def __init__(self, code, exposure=None):
VALID_CODES = (0, 1, 2, 4, 5, 6, 7, 20, 21, 22, 9999)
if not isinstance(code, int):
raise TypeError('Aperture Macro Primitive code must be an integer')
elif code not in VALID_CODES:
raise ValueError('Invalid Code. Valid codes are %s.' %
', '.join(map(str, VALID_CODES)))
if exposure is not None and exposure.lower() not in ('on', 'off'):
raise ValueError('Exposure must be either on or off')
self.code = code
self.exposure = exposure.lower() if exposure is not None else None
def to_inch(self):
raise NotImplementedError('Subclass must implement `to-inch`')
def to_metric(self):
raise NotImplementedError('Subclass must implement `to-metric`')
@property
def _level_polarity(self):
if self.exposure == 'off':
return 'clear'
return 'dark'
def to_primitive(self, units):
""" Return a Primitive instance based on the specified macro params.
"""
print('Rendering {}s is not supported yet.'.format(str(self.__class__)))
def __eq__(self, other):
return self.__dict__ == other.__dict__
class AMCommentPrimitive(AMPrimitive):
""" Aperture Macro Comment primitive. Code 0
The comment primitive has no image meaning. It is used to include human-
readable comments into the AM command.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.1:** Comment, primitive code 0
Parameters
----------
code : int
Aperture Macro primitive code. 0 Indicates an AMCommentPrimitive
comment : str
The comment as a string.
Returns
-------
CommentPrimitive : :class:`gerbers.am_statements.AMCommentPrimitive`
An Initialized AMCommentPrimitive
Raises
------
ValueError
"""
@classmethod
def from_gerber(cls, primitive):
primitive = primitive.strip()
code = int(primitive[0])
comment = primitive[1:]
return cls(code, comment)
def __init__(self, code, comment):
if code != 0:
raise ValueError('Not a valid Aperture Macro Comment statement')
super(AMCommentPrimitive, self).__init__(code)
self.comment = comment.strip(' *')
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return '0 %s *' % self.comment
def to_primitive(self, units):
"""
Returns None - has not primitive representation
"""
return None
def __str__(self):
return '<Aperture Macro Comment: %s>' % self.comment
class AMCirclePrimitive(AMPrimitive):
""" Aperture macro Circle primitive. Code 1
A circle primitive is defined by its center point and diameter.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.2:** Circle, primitive code 1
Parameters
----------
code : int
Circle Primitive code. Must be 1
exposure : string
'on' or 'off'
diameter : float
Circle diameter
position : tuple (<float>, <float>)
Position of the circle relative to the macro origin
Returns
-------
CirclePrimitive : :class:`gerbers.am_statements.AMCirclePrimitive`
An initialized AMCirclePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
diameter = float(modifiers[2])
position = (float(modifiers[3]), float(modifiers[4]))
return cls(code, exposure, diameter, position)
@classmethod
def from_primitive(cls, primitive):
return cls(1, 'on', primitive.diameter, primitive.position)
def __init__(self, code, exposure, diameter, position):
validate_coordinates(position)
if code != 1:
raise ValueError('CirclePrimitive code is 1')
super(AMCirclePrimitive, self).__init__(code, exposure)
self.diameter = diameter
self.position = position
def to_inch(self):
self.diameter = inch(self.diameter)
self.position = tuple([inch(x) for x in self.position])
def to_metric(self):
self.diameter = metric(self.diameter)
self.position = tuple([metric(x) for x in self.position])
def to_gerber(self, settings=None):
data = dict(code=self.code,
exposure='1' if self.exposure == 'on' else 0,
diameter=self.diameter,
x=self.position[0],
y=self.position[1])
return '{code},{exposure},{diameter},{x},{y}*'.format(**data)
def to_primitive(self, units):
return Circle((self.position), self.diameter, units=units, level_polarity=self._level_polarity)
class AMVectorLinePrimitive(AMPrimitive):
""" Aperture Macro Vector Line primitive. Code 2 or 20.
A vector line is a rectangle defined by its line width, start, and end
points. The line ends are rectangular.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.3:** Vector Line, primitive code 2 or 20.
Parameters
----------
code : int
Vector Line Primitive code. Must be either 2 or 20.
exposure : string
'on' or 'off'
width : float
Line width
start : tuple (<float>, <float>)
coordinate of line start point
end : tuple (<float>, <float>)
coordinate of line end point
rotation : float
Line rotation about the origin.
Returns
-------
LinePrimitive : :class:`gerbers.am_statements.AMVectorLinePrimitive`
An initialized AMVectorLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(2, 'on', primitive.aperture.width, primitive.start, primitive.end, 0)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
start = (float(modifiers[3]), float(modifiers[4]))
end = (float(modifiers[5]), float(modifiers[6]))
rotation = float(modifiers[7])
return cls(code, exposure, width, start, end, rotation)
def __init__(self, code, exposure, width, start, end, rotation):
validate_coordinates(start)
validate_coordinates(end)
if code not in (2, 20):
raise ValueError('VectorLinePrimitive codes are 2 or 20')
super(AMVectorLinePrimitive, self).__init__(code, exposure)
self.width = width
self.start = start
self.end = end
self.rotation = rotation
def to_inch(self):
self.width = inch(self.width)
self.start = tuple([inch(x) for x in self.start])
self.end = tuple([inch(x) for x in self.end])
def to_metric(self):
self.width = metric(self.width)
self.start = tuple([metric(x) for x in self.start])
self.end = tuple([metric(x) for x in self.end])
def to_gerber(self, settings=None):
fmtstr = '{code},{exp},{width},{startx},{starty},{endx},{endy},{rotation}*'
data = dict(code=self.code,
exp=1 if self.exposure == 'on' else 0,
width=self.width,
startx=self.start[0],
starty=self.start[1],
endx=self.end[0],
endy=self.end[1],
rotation=self.rotation)
return fmtstr.format(**data)
def to_primitive(self, units):
"""
Convert this to a primitive. We use the Outline to represent this (instead of Line)
because the behaviour of the end caps is different for aperture macros compared to Lines
when rotated.
"""
# Use a line to generate our vertices easily
line = Line(self.start, self.end, Rectangle(None, self.width, self.width))
vertices = line.vertices
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(vertices[-1], self.rotation, (0, 0))
for point in vertices:
cur_point = rotate_point(point, self.rotation, (0, 0))
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMOutlinePrimitive(AMPrimitive):
""" Aperture Macro Outline primitive. Code 4.
An outline primitive is an area enclosed by an n-point polygon defined by
its start point and n subsequent points. The outline must be closed, i.e.
the last point must be equal to the start point. Self intersecting
outlines are not allowed.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.6:** Outline, primitive code 4.
Parameters
----------
code : int
OutlinePrimitive code. Must be 6.
exposure : string
'on' or 'off'
start_point : tuple (<float>, <float>)
coordinate of outline start point
points : list of tuples (<float>, <float>)
coordinates of subsequent points
rotation : float
outline rotation about the origin.
Returns
-------
OutlinePrimitive : :class:`gerber.am_statements.AMOutlineinePrimitive`
An initialized AMOutlinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
start_point = (round(primitive.primitives[0].start[0], 6), round(primitive.primitives[0].start[1], 6))
points = []
for prim in primitive.primitives:
points.append((round(prim.end[0], 6), round(prim.end[1], 6)))
rotation = 0.0
return cls(4, 'on', start_point, points, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
n = int(float(modifiers[2]))
start_point = (float(modifiers[3]), float(modifiers[4]))
points = []
for i in range(n):
points.append((float(modifiers[5 + i * 2]),
float(modifiers[5 + i * 2 + 1])))
rotation = float(modifiers[-1])
return cls(code, exposure, start_point, points, rotation)
def __init__(self, code, exposure, start_point, points, rotation):
""" Initialize AMOutlinePrimitive
"""
validate_coordinates(start_point)
for point in points:
validate_coordinates(point)
if code != 4:
raise ValueError('OutlinePrimitive code is 4')
super(AMOutlinePrimitive, self).__init__(code, exposure)
self.start_point = start_point
if points[-1] != start_point:
raise ValueError('OutlinePrimitive must be closed')
self.points = points
self.rotation = rotation
def to_inch(self):
self.start_point = tuple([inch(x) for x in self.start_point])
self.points = tuple([(inch(x), inch(y)) for x, y in self.points])
def to_metric(self):
self.start_point = tuple([metric(x) for x in self.start_point])
self.points = tuple([(metric(x), metric(y)) for x, y in self.points])
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
n_points=len(self.points),
start_point="%.6g,%.6g" % self.start_point,
points=",\n".join(["%.6g,%.6g" % point for point in self.points]),
rotation=str(self.rotation)
)
return "{code},{exposure},{n_points},{start_point},{points},{rotation}*".format(**data)
def to_primitive(self, units):
"""
Convert this to a drawable primitive. This uses the Outline instead of Line
primitive to handle differences in end caps when rotated.
"""
lines = []
prev_point = rotate_point(self.start_point, self.rotation)
for point in self.points:
cur_point = rotate_point(point, self.rotation)
lines.append(Line(prev_point, cur_point, Circle((0,0), 0)))
prev_point = cur_point
if lines[0].start != lines[-1].end:
raise ValueError('Outline must be closed')
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMPolygonPrimitive(AMPrimitive):
""" Aperture Macro Polygon primitive. Code 5.
A polygon primitive is a regular polygon defined by the number of
vertices, the center point, and the diameter of the circumscribed circle.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.8:** Polygon, primitive code 5.
Parameters
----------
code : int
PolygonPrimitive code. Must be 5.
exposure : string
'on' or 'off'
vertices : int, 3 <= vertices <= 12
Number of vertices
position : tuple (<float>, <float>)
X and Y coordinates of polygon center
diameter : float
diameter of circumscribed circle.
rotation : float
polygon rotation about the origin.
Returns
-------
PolygonPrimitive : :class:`gerbers.am_statements.AMPolygonPrimitive`
An initialized AMPolygonPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(5, 'on', primitive.sides, primitive.position, primitive.diameter, primitive.rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
vertices = int(float(modifiers[2]))
position = (float(modifiers[3]), float(modifiers[4]))
try:
diameter = float(modifiers[5])
except:
diameter = 0
rotation = float(modifiers[6])
return cls(code, exposure, vertices, position, diameter, rotation)
def __init__(self, code, exposure, vertices, position, diameter, rotation):
""" Initialize AMPolygonPrimitive
"""
if code != 5:
raise ValueError('PolygonPrimitive code is 5')
super(AMPolygonPrimitive, self).__init__(code, exposure)
if vertices < 3 or vertices > 12:
raise ValueError('Number of vertices must be between 3 and 12')
self.vertices = vertices
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
vertices=self.vertices,
position="%.4g,%.4g" % self.position,
diameter='%.4g' % self.diameter,
rotation=str(self.rotation)
)
fmt = "{code},{exposure},{vertices},{position},{diameter},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
return Polygon(self.position, self.vertices, self.diameter / 2.0, 0, rotation=math.radians(self.rotation), units=units, level_polarity=self._level_polarity)
class AMMoirePrimitive(AMPrimitive):
""" Aperture Macro Moire primitive. Code 6.
The moire primitive is a cross hair centered on concentric rings (annuli).
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.9:** Moire, primitive code 6.
Parameters
----------
code : int
Moire Primitive code. Must be 6.
position : tuple (<float>, <float>)
X and Y coordinates of moire center
diameter : float
outer diameter of outer ring.
ring_thickness : float
thickness of concentric rings.
gap : float
gap between concentric rings.
max_rings : float
maximum number of rings
crosshair_thickness : float
thickness of crosshairs
crosshair_length : float
length of crosshairs
rotation : float
moire rotation about the origin.
Returns
-------
MoirePrimitive : :class:`gerbers.am_statements.AMMoirePrimitive`
An initialized AMMoirePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
diameter = float(modifiers[3])
ring_thickness = float(modifiers[4])
gap = float(modifiers[5])
max_rings = int(float(modifiers[6]))
crosshair_thickness = float(modifiers[7])
crosshair_length = float(modifiers[8])
rotation = float(modifiers[9])
return cls(code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation)
def __init__(self, code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation):
""" Initialize AMoirePrimitive
"""
if code != 6:
raise ValueError('MoirePrimitive code is 6')
super(AMMoirePrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.max_rings = max_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
self.ring_thickness = inch(self.ring_thickness)
self.gap = inch(self.gap)
self.crosshair_thickness = inch(self.crosshair_thickness)
self.crosshair_length = inch(self.crosshair_length)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
self.ring_thickness = metric(self.ring_thickness)
self.gap = metric(self.gap)
self.crosshair_thickness = metric(self.crosshair_thickness)
self.crosshair_length = metric(self.crosshair_length)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
diameter=self.diameter,
ring_thickness=self.ring_thickness,
gap=self.gap,
max_rings=self.max_rings,
crosshair_thickness=self.crosshair_thickness,
crosshair_length=self.crosshair_length,
rotation=self.rotation
)
fmt = "{code},{position},{diameter},{ring_thickness},{gap},{max_rings},{crosshair_thickness},{crosshair_length},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
#raise NotImplementedError()
return None
class AMThermalPrimitive(AMPrimitive):
""" Aperture Macro Thermal primitive. Code 7.
The thermal primitive is a ring (annulus) interrupted by four gaps.
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.10:** Thermal, primitive code 7.
Parameters
----------
code : int
Thermal Primitive code. Must be 7.
position : tuple (<float>, <float>)
X and Y coordinates of thermal center
outer_diameter : float
outer diameter of thermal.
inner_diameter : float
inner diameter of thermal.
gap : float
gap thickness
rotation : float
thermal rotation about the origin.
Returns
-------
ThermalPrimitive : :class:`gerbers.am_statements.AMThermalPrimitive`
An initialized AMThermalPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
outer_diameter = float(modifiers[3])
inner_diameter = float(modifiers[4])
gap = float(modifiers[5])
rotation = float(modifiers[6])
return cls(code, position, outer_diameter, inner_diameter, gap, rotation)
def __init__(self, code, position, outer_diameter, inner_diameter, gap, rotation):
if code != 7:
raise ValueError('ThermalPrimitive code is 7')
super(AMThermalPrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.outer_diameter = outer_diameter
self.inner_diameter = inner_diameter
self.gap = gap
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.outer_diameter = inch(self.outer_diameter)
self.inner_diameter = inch(self.inner_diameter)
self.gap = inch(self.gap)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.outer_diameter = metric(self.outer_diameter)
self.inner_diameter = metric(self.inner_diameter)
self.gap = metric(self.gap)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
outer_diameter=self.outer_diameter,
inner_diameter=self.inner_diameter,
gap=self.gap,
rotation=self.rotation
)
fmt = "{code},{position},{outer_diameter},{inner_diameter},{gap},{rotation}*"
return fmt.format(**data)
def _approximate_arc_cw(self, start_angle, end_angle, radius, center):
"""
Get an arc as a series of points
Parameters
----------
start_angle : The start angle in radians
end_angle : The end angle in radians
radius`: Radius of the arc
center : The center point of the arc (x, y) tuple
Returns
-------
array of point tuples
"""
# The total sweep
sweep_angle = end_angle - start_angle
num_steps = 10
angle_step = sweep_angle / num_steps
radius = radius
center = center
points = []
for i in range(num_steps + 1):
current_angle = start_angle + (angle_step * i)
nextx = (center[0] + math.cos(current_angle) * radius)
nexty = (center[1] + math.sin(current_angle) * radius)
points.append((nextx, nexty))
return points
def to_primitive(self, units):
# We start with calculating the top right section, then duplicate it
inner_radius = self.inner_diameter / 2.0
outer_radius = self.outer_diameter / 2.0
# Calculate the start angle relative to the horizontal axis
inner_offset_angle = asin(self.gap / 2.0 / inner_radius)
outer_offset_angle = asin(self.gap / 2.0 / outer_radius)
rotation_rad = math.radians(self.rotation)
inner_start_angle = inner_offset_angle + rotation_rad
inner_end_angle = math.pi / 2 - inner_offset_angle + rotation_rad
outer_start_angle = outer_offset_angle + rotation_rad
outer_end_angle = math.pi / 2 - outer_offset_angle + rotation_rad
outlines = []
aperture = Circle((0, 0), 0)
points = (self._approximate_arc_cw(inner_start_angle, inner_end_angle, inner_radius, self.position)
+ list(reversed(self._approximate_arc_cw(outer_start_angle, outer_end_angle, outer_radius, self.position))))
# Add in the last point since outlines should be closed
points.append(points[0])
# There are four outlines at rotated sections
for rotation in [0, 90.0, 180.0, 270.0]:
lines = []
prev_point = rotate_point(points[0], rotation, self.position)
for point in points[1:]:
cur_point = rotate_point(point, rotation, self.position)
lines.append(Line(prev_point, cur_point, aperture))
prev_point = cur_point
outlines.append(Outline(lines, units=units, level_polarity=self._level_polarity))
return outlines
class AMCenterLinePrimitive(AMPrimitive):
""" Aperture Macro Center Line primitive. Code 21.
The center line primitive is a rectangle defined by its width, height, and center point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.4:** Center Line, primitive code 21.
Parameters
----------
code : int
Center Line Primitive code. Must be 21.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
center : tuple (<float>, <float>)
X and Y coordinates of line center
rotation : float
rectangle rotation about its center.
Returns
-------
CenterLinePrimitive : :class:`gerbers.am_statements.AMCenterLinePrimitive`
An initialized AMCenterLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
width = primitive.width
height = primitive.height
center = primitive.position
rotation = math.degrees(primitive.rotation)
return cls(21, 'on', width, height, center, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
center = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, center, rotation)
def __init__(self, code, exposure, width, height, center, rotation):
if code != 21:
raise ValueError('CenterLinePrimitive code is 21')
super(AMCenterLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(center)
self.center = center
self.rotation = rotation
def to_inch(self):
self.center = tuple([inch(x) for x in self.center])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.center = tuple([metric(x) for x in self.center])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
center="%.4g,%.4g" % self.center,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{center},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
x = self.center[0]
y = self.center[1]
half_width = self.width / 2.0
half_height = self.height / 2.0
points = []
points.append((x - half_width, y + half_height))
points.append((x - half_width, y - half_height))
points.append((x + half_width, y - half_height))
points.append((x + half_width, y + half_height))
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(points[3], self.rotation, self.center)
for point in points:
cur_point = rotate_point(point, self.rotation, self.center)
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMLowerLeftLinePrimitive(AMPrimitive):
""" Aperture Macro Lower Left Line primitive. Code 22.
The lower left line primitive is a rectangle defined by its width, height, and the lower left point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.5:** Lower Left Line, primitive code 22.
Parameters
----------
code : int
Center Line Primitive code. Must be 22.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
lower_left : tuple (<float>, <float>)
X and Y coordinates of lower left corner
rotation : float
rectangle rotation about its origin.
Returns
-------
LowerLeftLinePrimitive : :class:`gerbers.am_statements.AMLowerLeftLinePrimitive`
An initialized AMLowerLeftLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
lower_left = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, lower_left, rotation)
def __init__(self, code, exposure, width, height, lower_left, rotation):
if code != 22:
raise ValueError('LowerLeftLinePrimitive code is 22')
super (AMLowerLeftLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(lower_left)
self.lower_left = lower_left
self.rotation = rotation
def to_inch(self):
self.lower_left = tuple([inch(x) for x in self.lower_left])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.lower_left = tuple([metric(x) for x in self.lower_left])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
lower_left="%.4g,%.4g" % self.lower_left,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{lower_left},{rotation}*"
return fmt.format(**data)
class AMUnsupportPrimitive(AMPrimitive):
@classmethod
def from_gerber(cls, primitive):
return cls(primitive)
def __init__(self, primitive):
super(AMUnsupportPrimitive, self).__init__(9999)
self.primitive = primitive
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return self.primitive
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2015 <NAME> <<EMAIL>> and <NAME>
# <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import asin
import math
from .primitives import *
from .utils import validate_coordinates, inch, metric, rotate_point
# TODO: Add support for aperture macro variables
__all__ = ['AMPrimitive', 'AMCommentPrimitive', 'AMCirclePrimitive',
'AMVectorLinePrimitive', 'AMOutlinePrimitive', 'AMPolygonPrimitive',
'AMMoirePrimitive', 'AMThermalPrimitive', 'AMCenterLinePrimitive',
'AMLowerLeftLinePrimitive', 'AMUnsupportPrimitive']
class AMPrimitive(object):
""" Aperture Macro Primitive Base Class
Parameters
----------
code : int
primitive shape code
exposure : str
on or off Primitives with exposure on create a slid part of
the macro aperture, and primitives with exposure off erase the
solid part created previously in the aperture macro definition.
.. note::
The erasing effect is limited to the aperture definition in
which it occurs.
Returns
-------
primitive : :class: `gerber.am_statements.AMPrimitive`
Raises
------
TypeError, ValueError
"""
def __init__(self, code, exposure=None):
VALID_CODES = (0, 1, 2, 4, 5, 6, 7, 20, 21, 22, 9999)
if not isinstance(code, int):
raise TypeError('Aperture Macro Primitive code must be an integer')
elif code not in VALID_CODES:
raise ValueError('Invalid Code. Valid codes are %s.' %
', '.join(map(str, VALID_CODES)))
if exposure is not None and exposure.lower() not in ('on', 'off'):
raise ValueError('Exposure must be either on or off')
self.code = code
self.exposure = exposure.lower() if exposure is not None else None
def to_inch(self):
raise NotImplementedError('Subclass must implement `to-inch`')
def to_metric(self):
raise NotImplementedError('Subclass must implement `to-metric`')
@property
def _level_polarity(self):
if self.exposure == 'off':
return 'clear'
return 'dark'
def to_primitive(self, units):
""" Return a Primitive instance based on the specified macro params.
"""
print('Rendering {}s is not supported yet.'.format(str(self.__class__)))
def __eq__(self, other):
return self.__dict__ == other.__dict__
class AMCommentPrimitive(AMPrimitive):
""" Aperture Macro Comment primitive. Code 0
The comment primitive has no image meaning. It is used to include human-
readable comments into the AM command.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.1:** Comment, primitive code 0
Parameters
----------
code : int
Aperture Macro primitive code. 0 Indicates an AMCommentPrimitive
comment : str
The comment as a string.
Returns
-------
CommentPrimitive : :class:`gerbers.am_statements.AMCommentPrimitive`
An Initialized AMCommentPrimitive
Raises
------
ValueError
"""
@classmethod
def from_gerber(cls, primitive):
primitive = primitive.strip()
code = int(primitive[0])
comment = primitive[1:]
return cls(code, comment)
def __init__(self, code, comment):
if code != 0:
raise ValueError('Not a valid Aperture Macro Comment statement')
super(AMCommentPrimitive, self).__init__(code)
self.comment = comment.strip(' *')
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return '0 %s *' % self.comment
def to_primitive(self, units):
"""
Returns None - has not primitive representation
"""
return None
def __str__(self):
return '<Aperture Macro Comment: %s>' % self.comment
class AMCirclePrimitive(AMPrimitive):
""" Aperture macro Circle primitive. Code 1
A circle primitive is defined by its center point and diameter.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.2:** Circle, primitive code 1
Parameters
----------
code : int
Circle Primitive code. Must be 1
exposure : string
'on' or 'off'
diameter : float
Circle diameter
position : tuple (<float>, <float>)
Position of the circle relative to the macro origin
Returns
-------
CirclePrimitive : :class:`gerbers.am_statements.AMCirclePrimitive`
An initialized AMCirclePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
diameter = float(modifiers[2])
position = (float(modifiers[3]), float(modifiers[4]))
return cls(code, exposure, diameter, position)
@classmethod
def from_primitive(cls, primitive):
return cls(1, 'on', primitive.diameter, primitive.position)
def __init__(self, code, exposure, diameter, position):
validate_coordinates(position)
if code != 1:
raise ValueError('CirclePrimitive code is 1')
super(AMCirclePrimitive, self).__init__(code, exposure)
self.diameter = diameter
self.position = position
def to_inch(self):
self.diameter = inch(self.diameter)
self.position = tuple([inch(x) for x in self.position])
def to_metric(self):
self.diameter = metric(self.diameter)
self.position = tuple([metric(x) for x in self.position])
def to_gerber(self, settings=None):
data = dict(code=self.code,
exposure='1' if self.exposure == 'on' else 0,
diameter=self.diameter,
x=self.position[0],
y=self.position[1])
return '{code},{exposure},{diameter},{x},{y}*'.format(**data)
def to_primitive(self, units):
return Circle((self.position), self.diameter, units=units, level_polarity=self._level_polarity)
class AMVectorLinePrimitive(AMPrimitive):
""" Aperture Macro Vector Line primitive. Code 2 or 20.
A vector line is a rectangle defined by its line width, start, and end
points. The line ends are rectangular.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.3:** Vector Line, primitive code 2 or 20.
Parameters
----------
code : int
Vector Line Primitive code. Must be either 2 or 20.
exposure : string
'on' or 'off'
width : float
Line width
start : tuple (<float>, <float>)
coordinate of line start point
end : tuple (<float>, <float>)
coordinate of line end point
rotation : float
Line rotation about the origin.
Returns
-------
LinePrimitive : :class:`gerbers.am_statements.AMVectorLinePrimitive`
An initialized AMVectorLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(2, 'on', primitive.aperture.width, primitive.start, primitive.end, 0)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
start = (float(modifiers[3]), float(modifiers[4]))
end = (float(modifiers[5]), float(modifiers[6]))
rotation = float(modifiers[7])
return cls(code, exposure, width, start, end, rotation)
def __init__(self, code, exposure, width, start, end, rotation):
validate_coordinates(start)
validate_coordinates(end)
if code not in (2, 20):
raise ValueError('VectorLinePrimitive codes are 2 or 20')
super(AMVectorLinePrimitive, self).__init__(code, exposure)
self.width = width
self.start = start
self.end = end
self.rotation = rotation
def to_inch(self):
self.width = inch(self.width)
self.start = tuple([inch(x) for x in self.start])
self.end = tuple([inch(x) for x in self.end])
def to_metric(self):
self.width = metric(self.width)
self.start = tuple([metric(x) for x in self.start])
self.end = tuple([metric(x) for x in self.end])
def to_gerber(self, settings=None):
fmtstr = '{code},{exp},{width},{startx},{starty},{endx},{endy},{rotation}*'
data = dict(code=self.code,
exp=1 if self.exposure == 'on' else 0,
width=self.width,
startx=self.start[0],
starty=self.start[1],
endx=self.end[0],
endy=self.end[1],
rotation=self.rotation)
return fmtstr.format(**data)
def to_primitive(self, units):
"""
Convert this to a primitive. We use the Outline to represent this (instead of Line)
because the behaviour of the end caps is different for aperture macros compared to Lines
when rotated.
"""
# Use a line to generate our vertices easily
line = Line(self.start, self.end, Rectangle(None, self.width, self.width))
vertices = line.vertices
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(vertices[-1], self.rotation, (0, 0))
for point in vertices:
cur_point = rotate_point(point, self.rotation, (0, 0))
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMOutlinePrimitive(AMPrimitive):
""" Aperture Macro Outline primitive. Code 4.
An outline primitive is an area enclosed by an n-point polygon defined by
its start point and n subsequent points. The outline must be closed, i.e.
the last point must be equal to the start point. Self intersecting
outlines are not allowed.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.6:** Outline, primitive code 4.
Parameters
----------
code : int
OutlinePrimitive code. Must be 6.
exposure : string
'on' or 'off'
start_point : tuple (<float>, <float>)
coordinate of outline start point
points : list of tuples (<float>, <float>)
coordinates of subsequent points
rotation : float
outline rotation about the origin.
Returns
-------
OutlinePrimitive : :class:`gerber.am_statements.AMOutlineinePrimitive`
An initialized AMOutlinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
start_point = (round(primitive.primitives[0].start[0], 6), round(primitive.primitives[0].start[1], 6))
points = []
for prim in primitive.primitives:
points.append((round(prim.end[0], 6), round(prim.end[1], 6)))
rotation = 0.0
return cls(4, 'on', start_point, points, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
n = int(float(modifiers[2]))
start_point = (float(modifiers[3]), float(modifiers[4]))
points = []
for i in range(n):
points.append((float(modifiers[5 + i * 2]),
float(modifiers[5 + i * 2 + 1])))
rotation = float(modifiers[-1])
return cls(code, exposure, start_point, points, rotation)
def __init__(self, code, exposure, start_point, points, rotation):
""" Initialize AMOutlinePrimitive
"""
validate_coordinates(start_point)
for point in points:
validate_coordinates(point)
if code != 4:
raise ValueError('OutlinePrimitive code is 4')
super(AMOutlinePrimitive, self).__init__(code, exposure)
self.start_point = start_point
if points[-1] != start_point:
raise ValueError('OutlinePrimitive must be closed')
self.points = points
self.rotation = rotation
def to_inch(self):
self.start_point = tuple([inch(x) for x in self.start_point])
self.points = tuple([(inch(x), inch(y)) for x, y in self.points])
def to_metric(self):
self.start_point = tuple([metric(x) for x in self.start_point])
self.points = tuple([(metric(x), metric(y)) for x, y in self.points])
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
n_points=len(self.points),
start_point="%.6g,%.6g" % self.start_point,
points=",\n".join(["%.6g,%.6g" % point for point in self.points]),
rotation=str(self.rotation)
)
return "{code},{exposure},{n_points},{start_point},{points},{rotation}*".format(**data)
def to_primitive(self, units):
"""
Convert this to a drawable primitive. This uses the Outline instead of Line
primitive to handle differences in end caps when rotated.
"""
lines = []
prev_point = rotate_point(self.start_point, self.rotation)
for point in self.points:
cur_point = rotate_point(point, self.rotation)
lines.append(Line(prev_point, cur_point, Circle((0,0), 0)))
prev_point = cur_point
if lines[0].start != lines[-1].end:
raise ValueError('Outline must be closed')
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMPolygonPrimitive(AMPrimitive):
""" Aperture Macro Polygon primitive. Code 5.
A polygon primitive is a regular polygon defined by the number of
vertices, the center point, and the diameter of the circumscribed circle.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.8:** Polygon, primitive code 5.
Parameters
----------
code : int
PolygonPrimitive code. Must be 5.
exposure : string
'on' or 'off'
vertices : int, 3 <= vertices <= 12
Number of vertices
position : tuple (<float>, <float>)
X and Y coordinates of polygon center
diameter : float
diameter of circumscribed circle.
rotation : float
polygon rotation about the origin.
Returns
-------
PolygonPrimitive : :class:`gerbers.am_statements.AMPolygonPrimitive`
An initialized AMPolygonPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(5, 'on', primitive.sides, primitive.position, primitive.diameter, primitive.rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
vertices = int(float(modifiers[2]))
position = (float(modifiers[3]), float(modifiers[4]))
try:
diameter = float(modifiers[5])
except:
diameter = 0
rotation = float(modifiers[6])
return cls(code, exposure, vertices, position, diameter, rotation)
def __init__(self, code, exposure, vertices, position, diameter, rotation):
""" Initialize AMPolygonPrimitive
"""
if code != 5:
raise ValueError('PolygonPrimitive code is 5')
super(AMPolygonPrimitive, self).__init__(code, exposure)
if vertices < 3 or vertices > 12:
raise ValueError('Number of vertices must be between 3 and 12')
self.vertices = vertices
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
vertices=self.vertices,
position="%.4g,%.4g" % self.position,
diameter='%.4g' % self.diameter,
rotation=str(self.rotation)
)
fmt = "{code},{exposure},{vertices},{position},{diameter},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
return Polygon(self.position, self.vertices, self.diameter / 2.0, 0, rotation=math.radians(self.rotation), units=units, level_polarity=self._level_polarity)
class AMMoirePrimitive(AMPrimitive):
""" Aperture Macro Moire primitive. Code 6.
The moire primitive is a cross hair centered on concentric rings (annuli).
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.9:** Moire, primitive code 6.
Parameters
----------
code : int
Moire Primitive code. Must be 6.
position : tuple (<float>, <float>)
X and Y coordinates of moire center
diameter : float
outer diameter of outer ring.
ring_thickness : float
thickness of concentric rings.
gap : float
gap between concentric rings.
max_rings : float
maximum number of rings
crosshair_thickness : float
thickness of crosshairs
crosshair_length : float
length of crosshairs
rotation : float
moire rotation about the origin.
Returns
-------
MoirePrimitive : :class:`gerbers.am_statements.AMMoirePrimitive`
An initialized AMMoirePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
diameter = float(modifiers[3])
ring_thickness = float(modifiers[4])
gap = float(modifiers[5])
max_rings = int(float(modifiers[6]))
crosshair_thickness = float(modifiers[7])
crosshair_length = float(modifiers[8])
rotation = float(modifiers[9])
return cls(code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation)
def __init__(self, code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation):
""" Initialize AMoirePrimitive
"""
if code != 6:
raise ValueError('MoirePrimitive code is 6')
super(AMMoirePrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.max_rings = max_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
self.ring_thickness = inch(self.ring_thickness)
self.gap = inch(self.gap)
self.crosshair_thickness = inch(self.crosshair_thickness)
self.crosshair_length = inch(self.crosshair_length)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
self.ring_thickness = metric(self.ring_thickness)
self.gap = metric(self.gap)
self.crosshair_thickness = metric(self.crosshair_thickness)
self.crosshair_length = metric(self.crosshair_length)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
diameter=self.diameter,
ring_thickness=self.ring_thickness,
gap=self.gap,
max_rings=self.max_rings,
crosshair_thickness=self.crosshair_thickness,
crosshair_length=self.crosshair_length,
rotation=self.rotation
)
fmt = "{code},{position},{diameter},{ring_thickness},{gap},{max_rings},{crosshair_thickness},{crosshair_length},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
#raise NotImplementedError()
return None
class AMThermalPrimitive(AMPrimitive):
""" Aperture Macro Thermal primitive. Code 7.
The thermal primitive is a ring (annulus) interrupted by four gaps.
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.10:** Thermal, primitive code 7.
Parameters
----------
code : int
Thermal Primitive code. Must be 7.
position : tuple (<float>, <float>)
X and Y coordinates of thermal center
outer_diameter : float
outer diameter of thermal.
inner_diameter : float
inner diameter of thermal.
gap : float
gap thickness
rotation : float
thermal rotation about the origin.
Returns
-------
ThermalPrimitive : :class:`gerbers.am_statements.AMThermalPrimitive`
An initialized AMThermalPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
outer_diameter = float(modifiers[3])
inner_diameter = float(modifiers[4])
gap = float(modifiers[5])
rotation = float(modifiers[6])
return cls(code, position, outer_diameter, inner_diameter, gap, rotation)
def __init__(self, code, position, outer_diameter, inner_diameter, gap, rotation):
if code != 7:
raise ValueError('ThermalPrimitive code is 7')
super(AMThermalPrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.outer_diameter = outer_diameter
self.inner_diameter = inner_diameter
self.gap = gap
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.outer_diameter = inch(self.outer_diameter)
self.inner_diameter = inch(self.inner_diameter)
self.gap = inch(self.gap)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.outer_diameter = metric(self.outer_diameter)
self.inner_diameter = metric(self.inner_diameter)
self.gap = metric(self.gap)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
outer_diameter=self.outer_diameter,
inner_diameter=self.inner_diameter,
gap=self.gap,
rotation=self.rotation
)
fmt = "{code},{position},{outer_diameter},{inner_diameter},{gap},{rotation}*"
return fmt.format(**data)
def _approximate_arc_cw(self, start_angle, end_angle, radius, center):
"""
Get an arc as a series of points
Parameters
----------
start_angle : The start angle in radians
end_angle : The end angle in radians
radius`: Radius of the arc
center : The center point of the arc (x, y) tuple
Returns
-------
array of point tuples
"""
# The total sweep
sweep_angle = end_angle - start_angle
num_steps = 10
angle_step = sweep_angle / num_steps
radius = radius
center = center
points = []
for i in range(num_steps + 1):
current_angle = start_angle + (angle_step * i)
nextx = (center[0] + math.cos(current_angle) * radius)
nexty = (center[1] + math.sin(current_angle) * radius)
points.append((nextx, nexty))
return points
def to_primitive(self, units):
# We start with calculating the top right section, then duplicate it
inner_radius = self.inner_diameter / 2.0
outer_radius = self.outer_diameter / 2.0
# Calculate the start angle relative to the horizontal axis
inner_offset_angle = asin(self.gap / 2.0 / inner_radius)
outer_offset_angle = asin(self.gap / 2.0 / outer_radius)
rotation_rad = math.radians(self.rotation)
inner_start_angle = inner_offset_angle + rotation_rad
inner_end_angle = math.pi / 2 - inner_offset_angle + rotation_rad
outer_start_angle = outer_offset_angle + rotation_rad
outer_end_angle = math.pi / 2 - outer_offset_angle + rotation_rad
outlines = []
aperture = Circle((0, 0), 0)
points = (self._approximate_arc_cw(inner_start_angle, inner_end_angle, inner_radius, self.position)
+ list(reversed(self._approximate_arc_cw(outer_start_angle, outer_end_angle, outer_radius, self.position))))
# Add in the last point since outlines should be closed
points.append(points[0])
# There are four outlines at rotated sections
for rotation in [0, 90.0, 180.0, 270.0]:
lines = []
prev_point = rotate_point(points[0], rotation, self.position)
for point in points[1:]:
cur_point = rotate_point(point, rotation, self.position)
lines.append(Line(prev_point, cur_point, aperture))
prev_point = cur_point
outlines.append(Outline(lines, units=units, level_polarity=self._level_polarity))
return outlines
class AMCenterLinePrimitive(AMPrimitive):
""" Aperture Macro Center Line primitive. Code 21.
The center line primitive is a rectangle defined by its width, height, and center point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.4:** Center Line, primitive code 21.
Parameters
----------
code : int
Center Line Primitive code. Must be 21.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
center : tuple (<float>, <float>)
X and Y coordinates of line center
rotation : float
rectangle rotation about its center.
Returns
-------
CenterLinePrimitive : :class:`gerbers.am_statements.AMCenterLinePrimitive`
An initialized AMCenterLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
width = primitive.width
height = primitive.height
center = primitive.position
rotation = math.degrees(primitive.rotation)
return cls(21, 'on', width, height, center, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
center = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, center, rotation)
def __init__(self, code, exposure, width, height, center, rotation):
if code != 21:
raise ValueError('CenterLinePrimitive code is 21')
super(AMCenterLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(center)
self.center = center
self.rotation = rotation
def to_inch(self):
self.center = tuple([inch(x) for x in self.center])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.center = tuple([metric(x) for x in self.center])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
center="%.4g,%.4g" % self.center,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{center},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
x = self.center[0]
y = self.center[1]
half_width = self.width / 2.0
half_height = self.height / 2.0
points = []
points.append((x - half_width, y + half_height))
points.append((x - half_width, y - half_height))
points.append((x + half_width, y - half_height))
points.append((x + half_width, y + half_height))
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(points[3], self.rotation, self.center)
for point in points:
cur_point = rotate_point(point, self.rotation, self.center)
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMLowerLeftLinePrimitive(AMPrimitive):
""" Aperture Macro Lower Left Line primitive. Code 22.
The lower left line primitive is a rectangle defined by its width, height, and the lower left point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.5:** Lower Left Line, primitive code 22.
Parameters
----------
code : int
Center Line Primitive code. Must be 22.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
lower_left : tuple (<float>, <float>)
X and Y coordinates of lower left corner
rotation : float
rectangle rotation about its origin.
Returns
-------
LowerLeftLinePrimitive : :class:`gerbers.am_statements.AMLowerLeftLinePrimitive`
An initialized AMLowerLeftLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
lower_left = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, lower_left, rotation)
def __init__(self, code, exposure, width, height, lower_left, rotation):
if code != 22:
raise ValueError('LowerLeftLinePrimitive code is 22')
super (AMLowerLeftLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(lower_left)
self.lower_left = lower_left
self.rotation = rotation
def to_inch(self):
self.lower_left = tuple([inch(x) for x in self.lower_left])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.lower_left = tuple([metric(x) for x in self.lower_left])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
lower_left="%.4g,%.4g" % self.lower_left,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{lower_left},{rotation}*"
return fmt.format(**data)
class AMUnsupportPrimitive(AMPrimitive):
@classmethod
def from_gerber(cls, primitive):
return cls(primitive)
def __init__(self, primitive):
super(AMUnsupportPrimitive, self).__init__(9999)
self.primitive = primitive
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return self.primitive | en | 0.599759 | #!/usr/bin/env python # -*- coding: utf-8 -*- # copyright 2015 <NAME> <<EMAIL>> and <NAME> # <<EMAIL>> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Add support for aperture macro variables Aperture Macro Primitive Base Class Parameters ---------- code : int primitive shape code exposure : str on or off Primitives with exposure on create a slid part of the macro aperture, and primitives with exposure off erase the solid part created previously in the aperture macro definition. .. note:: The erasing effect is limited to the aperture definition in which it occurs. Returns ------- primitive : :class: `gerber.am_statements.AMPrimitive` Raises ------ TypeError, ValueError Return a Primitive instance based on the specified macro params. Aperture Macro Comment primitive. Code 0 The comment primitive has no image meaning. It is used to include human- readable comments into the AM command. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.1:** Comment, primitive code 0 Parameters ---------- code : int Aperture Macro primitive code. 0 Indicates an AMCommentPrimitive comment : str The comment as a string. Returns ------- CommentPrimitive : :class:`gerbers.am_statements.AMCommentPrimitive` An Initialized AMCommentPrimitive Raises ------ ValueError Returns None - has not primitive representation Aperture macro Circle primitive. Code 1 A circle primitive is defined by its center point and diameter. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.2:** Circle, primitive code 1 Parameters ---------- code : int Circle Primitive code. Must be 1 exposure : string 'on' or 'off' diameter : float Circle diameter position : tuple (<float>, <float>) Position of the circle relative to the macro origin Returns ------- CirclePrimitive : :class:`gerbers.am_statements.AMCirclePrimitive` An initialized AMCirclePrimitive Raises ------ ValueError, TypeError Aperture Macro Vector Line primitive. Code 2 or 20. A vector line is a rectangle defined by its line width, start, and end points. The line ends are rectangular. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.3:** Vector Line, primitive code 2 or 20. Parameters ---------- code : int Vector Line Primitive code. Must be either 2 or 20. exposure : string 'on' or 'off' width : float Line width start : tuple (<float>, <float>) coordinate of line start point end : tuple (<float>, <float>) coordinate of line end point rotation : float Line rotation about the origin. Returns ------- LinePrimitive : :class:`gerbers.am_statements.AMVectorLinePrimitive` An initialized AMVectorLinePrimitive Raises ------ ValueError, TypeError Convert this to a primitive. We use the Outline to represent this (instead of Line) because the behaviour of the end caps is different for aperture macros compared to Lines when rotated. # Use a line to generate our vertices easily Aperture Macro Outline primitive. Code 4. An outline primitive is an area enclosed by an n-point polygon defined by its start point and n subsequent points. The outline must be closed, i.e. the last point must be equal to the start point. Self intersecting outlines are not allowed. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.6:** Outline, primitive code 4. Parameters ---------- code : int OutlinePrimitive code. Must be 6. exposure : string 'on' or 'off' start_point : tuple (<float>, <float>) coordinate of outline start point points : list of tuples (<float>, <float>) coordinates of subsequent points rotation : float outline rotation about the origin. Returns ------- OutlinePrimitive : :class:`gerber.am_statements.AMOutlineinePrimitive` An initialized AMOutlinePrimitive Raises ------ ValueError, TypeError Initialize AMOutlinePrimitive Convert this to a drawable primitive. This uses the Outline instead of Line primitive to handle differences in end caps when rotated. Aperture Macro Polygon primitive. Code 5. A polygon primitive is a regular polygon defined by the number of vertices, the center point, and the diameter of the circumscribed circle. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.8:** Polygon, primitive code 5. Parameters ---------- code : int PolygonPrimitive code. Must be 5. exposure : string 'on' or 'off' vertices : int, 3 <= vertices <= 12 Number of vertices position : tuple (<float>, <float>) X and Y coordinates of polygon center diameter : float diameter of circumscribed circle. rotation : float polygon rotation about the origin. Returns ------- PolygonPrimitive : :class:`gerbers.am_statements.AMPolygonPrimitive` An initialized AMPolygonPrimitive Raises ------ ValueError, TypeError Initialize AMPolygonPrimitive Aperture Macro Moire primitive. Code 6. The moire primitive is a cross hair centered on concentric rings (annuli). Exposure is always on. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.9:** Moire, primitive code 6. Parameters ---------- code : int Moire Primitive code. Must be 6. position : tuple (<float>, <float>) X and Y coordinates of moire center diameter : float outer diameter of outer ring. ring_thickness : float thickness of concentric rings. gap : float gap between concentric rings. max_rings : float maximum number of rings crosshair_thickness : float thickness of crosshairs crosshair_length : float length of crosshairs rotation : float moire rotation about the origin. Returns ------- MoirePrimitive : :class:`gerbers.am_statements.AMMoirePrimitive` An initialized AMMoirePrimitive Raises ------ ValueError, TypeError Initialize AMoirePrimitive #raise NotImplementedError() Aperture Macro Thermal primitive. Code 7. The thermal primitive is a ring (annulus) interrupted by four gaps. Exposure is always on. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.10:** Thermal, primitive code 7. Parameters ---------- code : int Thermal Primitive code. Must be 7. position : tuple (<float>, <float>) X and Y coordinates of thermal center outer_diameter : float outer diameter of thermal. inner_diameter : float inner diameter of thermal. gap : float gap thickness rotation : float thermal rotation about the origin. Returns ------- ThermalPrimitive : :class:`gerbers.am_statements.AMThermalPrimitive` An initialized AMThermalPrimitive Raises ------ ValueError, TypeError Get an arc as a series of points Parameters ---------- start_angle : The start angle in radians end_angle : The end angle in radians radius`: Radius of the arc center : The center point of the arc (x, y) tuple Returns ------- array of point tuples # The total sweep # We start with calculating the top right section, then duplicate it # Calculate the start angle relative to the horizontal axis # Add in the last point since outlines should be closed # There are four outlines at rotated sections Aperture Macro Center Line primitive. Code 21. The center line primitive is a rectangle defined by its width, height, and center point. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.4:** Center Line, primitive code 21. Parameters ---------- code : int Center Line Primitive code. Must be 21. exposure : str 'on' or 'off' width : float Width of rectangle height : float Height of rectangle center : tuple (<float>, <float>) X and Y coordinates of line center rotation : float rectangle rotation about its center. Returns ------- CenterLinePrimitive : :class:`gerbers.am_statements.AMCenterLinePrimitive` An initialized AMCenterLinePrimitive Raises ------ ValueError, TypeError Aperture Macro Lower Left Line primitive. Code 22. The lower left line primitive is a rectangle defined by its width, height, and the lower left point. .. seealso:: `The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_ **Section 4.12.3.5:** Lower Left Line, primitive code 22. Parameters ---------- code : int Center Line Primitive code. Must be 22. exposure : str 'on' or 'off' width : float Width of rectangle height : float Height of rectangle lower_left : tuple (<float>, <float>) X and Y coordinates of lower left corner rotation : float rectangle rotation about its origin. Returns ------- LowerLeftLinePrimitive : :class:`gerbers.am_statements.AMLowerLeftLinePrimitive` An initialized AMLowerLeftLinePrimitive Raises ------ ValueError, TypeError | 2.432302 | 2 |
heroquest/migrations/0002_auto_20160819_1747.py | DeividVM/heroquest | 0 | 7451 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-19 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heroquest', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='armor',
),
migrations.AlterField(
model_name='player',
name='spell',
field=models.ManyToManyField(related_name='spells', to='armery.Spell', verbose_name='Hechizos'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-19 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heroquest', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='armor',
),
migrations.AlterField(
model_name='player',
name='spell',
field=models.ManyToManyField(related_name='spells', to='armery.Spell', verbose_name='Hechizos'),
),
]
| en | 0.829543 | # -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2016-08-19 17:47 | 1.606497 | 2 |
dask/array/utils.py | epervago/dask | 0 | 7452 | <filename>dask/array/utils.py
from distutils.version import LooseVersion
import difflib
import os
import numpy as np
from .core import Array
from ..async import get_sync
if LooseVersion(np.__version__) >= '1.10.0':
allclose = np.allclose
else:
def allclose(a, b, **kwargs):
if kwargs.pop('equal_nan', False):
a_nans = np.isnan(a)
b_nans = np.isnan(b)
if not (a_nans == b_nans).all():
return False
a = a[~a_nans]
b = b[~b_nans]
return np.allclose(a, b, **kwargs)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _maybe_check_dtype(a, dtype=None):
# Only check dtype matches for non-empty
if _not_empty(a):
assert a.dtype == dtype
def assert_eq(a, b, **kwargs):
if isinstance(a, Array):
adt = a.dtype
a = a.compute(get=get_sync)
_maybe_check_dtype(a, adt)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b.dtype
assert bdt is not None
b = b.compute(get=get_sync)
_maybe_check_dtype(b, bdt)
else:
bdt = getattr(b, 'dtype', None)
if str(adt) != str(bdt):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError('string repr are different' + os.linesep +
os.linesep.join(diff))
try:
if _not_empty(a) and _not_empty(b):
# Treat all empty arrays as equivalent
assert a.shape == b.shape
assert allclose(a, b, **kwargs)
return
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
| <filename>dask/array/utils.py
from distutils.version import LooseVersion
import difflib
import os
import numpy as np
from .core import Array
from ..async import get_sync
if LooseVersion(np.__version__) >= '1.10.0':
allclose = np.allclose
else:
def allclose(a, b, **kwargs):
if kwargs.pop('equal_nan', False):
a_nans = np.isnan(a)
b_nans = np.isnan(b)
if not (a_nans == b_nans).all():
return False
a = a[~a_nans]
b = b[~b_nans]
return np.allclose(a, b, **kwargs)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _maybe_check_dtype(a, dtype=None):
# Only check dtype matches for non-empty
if _not_empty(a):
assert a.dtype == dtype
def assert_eq(a, b, **kwargs):
if isinstance(a, Array):
adt = a.dtype
a = a.compute(get=get_sync)
_maybe_check_dtype(a, adt)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b.dtype
assert bdt is not None
b = b.compute(get=get_sync)
_maybe_check_dtype(b, bdt)
else:
bdt = getattr(b, 'dtype', None)
if str(adt) != str(bdt):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError('string repr are different' + os.linesep +
os.linesep.join(diff))
try:
if _not_empty(a) and _not_empty(b):
# Treat all empty arrays as equivalent
assert a.shape == b.shape
assert allclose(a, b, **kwargs)
return
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
| en | 0.666705 | # Only check dtype matches for non-empty # Treat all empty arrays as equivalent | 2.149642 | 2 |
launchpad_py/__init__.py | inniyah/launchpad-py | 1 | 7453 | <reponame>inniyah/launchpad-py<gh_stars>1-10
# more specific selections for Python 3 (ASkr, 2/2018)
from launchpad_py.launchpad import Launchpad
from launchpad_py.launchpad import LaunchpadMk2
from launchpad_py.launchpad import LaunchpadPro
from launchpad_py.launchpad import LaunchControlXL
from launchpad_py.launchpad import LaunchKeyMini
from launchpad_py.launchpad import Dicer
from launchpad_py import charset
| # more specific selections for Python 3 (ASkr, 2/2018)
from launchpad_py.launchpad import Launchpad
from launchpad_py.launchpad import LaunchpadMk2
from launchpad_py.launchpad import LaunchpadPro
from launchpad_py.launchpad import LaunchControlXL
from launchpad_py.launchpad import LaunchKeyMini
from launchpad_py.launchpad import Dicer
from launchpad_py import charset | en | 0.65638 | # more specific selections for Python 3 (ASkr, 2/2018) | 1.251906 | 1 |
networks/adabins/utils.py | EvoCargo/mono_depth | 0 | 7454 | <reponame>EvoCargo/mono_depth
import base64
import math
import re
from io import BytesIO
import matplotlib.cm
import numpy as np
import torch
import torch.nn
from PIL import Image
# Compute edge magnitudes
from scipy import ndimage
class RunningAverage:
def __init__(self):
self.avg = 0
self.count = 0
def append(self, value):
self.avg = (value + self.count * self.avg) / (self.count + 1)
self.count += 1
def get_value(self):
return self.avg
def denormalize(x, device='cpu'):
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
return x * std + mean
class RunningAverageDict:
def __init__(self):
self._dict = None
def update(self, new_dict):
if self._dict is None:
self._dict = dict()
for key, _ in new_dict.items():
self._dict[key] = RunningAverage()
for key, value in new_dict.items():
self._dict[key].append(value)
def get_value(self):
return {key: value.get_value() for key, value in self._dict.items()}
def colorize(value, vmin=10, vmax=1000, cmap='magma_r'):
value = value.cpu().numpy()[0, :, :]
invalid_mask = value == -1
# normalize
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.0
# squeeze last dim if it exists
# value = value.squeeze(axis=0)
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True) # (nxmx4)
value[invalid_mask] = 255
img = value[:, :, :3]
# return img.transpose((2, 0, 1))
return img
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(
a1=a1,
a2=a2,
a3=a3,
abs_rel=abs_rel,
rmse=rmse,
log_10=log_10,
rmse_log=rmse_log,
silog=silog,
sq_rel=sq_rel,
)
# Demo Utilities
def b64_to_pil(b64string):
image_data = re.sub('^data:image/.+;base64,', '', b64string)
# image = Image.open(cStringIO.StringIO(image_data))
return Image.open(BytesIO(base64.b64decode(image_data)))
def edges(d):
dx = ndimage.sobel(d, 0) # horizontal derivative
dy = ndimage.sobel(d, 1) # vertical derivative
return np.abs(dx) + np.abs(dy)
class PointCloudHelper:
def __init__(self, width=640, height=480):
self.xx, self.yy = self.worldCoords(width, height)
def worldCoords(self, width=640, height=480):
hfov_degrees, vfov_degrees = 57, 43
hFov = math.radians(hfov_degrees)
vFov = math.radians(vfov_degrees)
cx, cy = width / 2, height / 2
fx = width / (2 * math.tan(hFov / 2))
fy = height / (2 * math.tan(vFov / 2))
xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
xx = (xx - cx) / fx
yy = (yy - cy) / fy
return xx, yy
def depth_to_points(self, depth):
depth[edges(depth) > 0.3] = np.nan # Hide depth edges
length = depth.shape[0] * depth.shape[1]
# depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
z = depth.reshape(length)
return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3))
| import base64
import math
import re
from io import BytesIO
import matplotlib.cm
import numpy as np
import torch
import torch.nn
from PIL import Image
# Compute edge magnitudes
from scipy import ndimage
class RunningAverage:
def __init__(self):
self.avg = 0
self.count = 0
def append(self, value):
self.avg = (value + self.count * self.avg) / (self.count + 1)
self.count += 1
def get_value(self):
return self.avg
def denormalize(x, device='cpu'):
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
return x * std + mean
class RunningAverageDict:
def __init__(self):
self._dict = None
def update(self, new_dict):
if self._dict is None:
self._dict = dict()
for key, _ in new_dict.items():
self._dict[key] = RunningAverage()
for key, value in new_dict.items():
self._dict[key].append(value)
def get_value(self):
return {key: value.get_value() for key, value in self._dict.items()}
def colorize(value, vmin=10, vmax=1000, cmap='magma_r'):
value = value.cpu().numpy()[0, :, :]
invalid_mask = value == -1
# normalize
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.0
# squeeze last dim if it exists
# value = value.squeeze(axis=0)
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True) # (nxmx4)
value[invalid_mask] = 255
img = value[:, :, :3]
# return img.transpose((2, 0, 1))
return img
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(
a1=a1,
a2=a2,
a3=a3,
abs_rel=abs_rel,
rmse=rmse,
log_10=log_10,
rmse_log=rmse_log,
silog=silog,
sq_rel=sq_rel,
)
# Demo Utilities
def b64_to_pil(b64string):
image_data = re.sub('^data:image/.+;base64,', '', b64string)
# image = Image.open(cStringIO.StringIO(image_data))
return Image.open(BytesIO(base64.b64decode(image_data)))
def edges(d):
dx = ndimage.sobel(d, 0) # horizontal derivative
dy = ndimage.sobel(d, 1) # vertical derivative
return np.abs(dx) + np.abs(dy)
class PointCloudHelper:
def __init__(self, width=640, height=480):
self.xx, self.yy = self.worldCoords(width, height)
def worldCoords(self, width=640, height=480):
hfov_degrees, vfov_degrees = 57, 43
hFov = math.radians(hfov_degrees)
vFov = math.radians(vfov_degrees)
cx, cy = width / 2, height / 2
fx = width / (2 * math.tan(hFov / 2))
fy = height / (2 * math.tan(vFov / 2))
xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
xx = (xx - cx) / fx
yy = (yy - cy) / fy
return xx, yy
def depth_to_points(self, depth):
depth[edges(depth) > 0.3] = np.nan # Hide depth edges
length = depth.shape[0] * depth.shape[1]
# depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
z = depth.reshape(length)
return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3)) | en | 0.328984 | # Compute edge magnitudes # normalize # vmin..vmax # Avoid 0-division # squeeze last dim if it exists # value = value.squeeze(axis=0) # (nxmx4) # return img.transpose((2, 0, 1)) # Demo Utilities # image = Image.open(cStringIO.StringIO(image_data)) # horizontal derivative # vertical derivative # Hide depth edges # depth[edges(depth) > 0.3] = 1e6 # Hide depth edges | 2.101586 | 2 |
gdsfactory/types.py | simbilod/gdsfactory | 0 | 7455 | <gh_stars>0
"""In programming, a factory is a function that returns an object.
Functions are easy to understand because they have clear inputs and outputs.
Most gdsfactory functions take some inputs and return a Component object.
Some of these inputs parameters are also functions.
- Component: Object with.
- name.
- references: to other components (x, y, rotation).
- polygons in different layers.
- ports dict.
- Route: dataclass with 3 attributes.
- references: list of references (straights, bends and tapers).
- ports: dict(input=PortIn, output=PortOut).
- length: how long is this route?
Factories:
- ComponentFactory: function that returns a Component.
- RouteFactory: function that returns a Route.
Specs:
- ComponentSpec: Component, ComponentFactory or dict(component=mzi, settings=dict(delta_length=20)).
- LayerSpec: (3, 0), 3 (asumes 0 as datatype) or string.
"""
import json
import pathlib
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import numpy as np
from omegaconf import OmegaConf
from phidl.device_layout import Label as LabelPhidl
from phidl.device_layout import Path
from pydantic import BaseModel, Extra
from typing_extensions import Literal
from gdsfactory.component import Component, ComponentReference
from gdsfactory.cross_section import CrossSection
from gdsfactory.port import Port
Anchor = Literal[
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
Axis = Literal["x", "y"]
NSEW = Literal["N", "S", "E", "W"]
class Label(LabelPhidl):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
"""check with pydantic Label valid type"""
assert isinstance(v, LabelPhidl), f"TypeError, Got {type(v)}, expecting Label"
return v
Float2 = Tuple[float, float]
Float3 = Tuple[float, float, float]
Floats = Tuple[float, ...]
Strs = Tuple[str, ...]
Int2 = Tuple[int, int]
Int3 = Tuple[int, int, int]
Ints = Tuple[int, ...]
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
LayerSpec = NewType("LayerSpec", Union[Layer, int, str, None])
LayerSpecs = Tuple[LayerSpec, ...]
ComponentFactory = Callable[..., Component]
ComponentFactoryDict = Dict[str, ComponentFactory]
PathFactory = Callable[..., Path]
PathType = Union[str, pathlib.Path]
PathTypes = Tuple[PathType, ...]
ComponentOrPath = Union[PathType, Component]
ComponentOrReference = Union[Component, ComponentReference]
NameToFunctionDict = Dict[str, ComponentFactory]
Number = Union[float, int]
Coordinate = Tuple[float, float]
Coordinates = Tuple[Coordinate, ...]
ComponentOrPath = Union[Component, PathType]
CrossSectionFactory = Callable[..., CrossSection]
CrossSectionOrFactory = Union[CrossSection, Callable[..., CrossSection]]
PortSymmetries = Dict[str, Dict[str, List[str]]]
PortsDict = Dict[str, Port]
PortsList = Dict[str, Port]
ComponentSpec = NewType(
"ComponentSpec", Union[str, ComponentFactory, Component, Dict[str, Any]]
)
ComponentSpecOrList = Union[ComponentSpec, List[ComponentSpec]]
CellSpec = Union[str, ComponentFactory, Dict[str, Any]]
ComponentSpecDict = Dict[str, ComponentSpec]
CrossSectionSpec = NewType(
"CrossSectionSpec", Union[str, CrossSectionFactory, CrossSection, Dict[str, Any]]
)
MultiCrossSectionAngleSpec = List[Tuple[CrossSectionSpec, Tuple[int, ...]]]
class Route(BaseModel):
references: List[ComponentReference]
labels: Optional[List[Label]] = None
ports: Tuple[Port, Port]
length: float
class Config:
extra = Extra.forbid
class Routes(BaseModel):
references: List[ComponentReference]
lengths: List[float]
ports: Optional[List[Port]] = None
bend_radius: Optional[List[float]] = None
class Config:
extra = Extra.forbid
class ComponentModel(BaseModel):
component: Union[str, Dict[str, Any]]
settings: Optional[Dict[str, Any]]
class Config:
extra = Extra.forbid
class PlacementModel(BaseModel):
x: Union[str, float] = 0
y: Union[str, float] = 0
xmin: Optional[Union[str, float]] = None
ymin: Optional[Union[str, float]] = None
xmax: Optional[Union[str, float]] = None
ymax: Optional[Union[str, float]] = None
dx: float = 0
dy: float = 0
port: Optional[Union[str, Anchor]] = None
rotation: int = 0
mirror: bool = False
class Config:
extra = Extra.forbid
class RouteModel(BaseModel):
links: Dict[str, str]
settings: Optional[Dict[str, Any]] = None
routing_strategy: Optional[str] = None
class Config:
extra = Extra.forbid
class NetlistModel(BaseModel):
"""Netlist defined component.
Attributes:
instances: dict of instances (name, settings, component).
placements: dict of placements.
connections: dict of connections.
routes: dict of routes.
name: component name.
info: information (polarization, wavelength ...).
settings: input variables.
pdk: pdk module name.
ports: exposed component ports.
"""
instances: Dict[str, ComponentModel]
placements: Optional[Dict[str, PlacementModel]] = None
connections: Optional[List[Dict[str, str]]] = None
routes: Optional[Dict[str, RouteModel]] = None
name: Optional[str] = None
info: Optional[Dict[str, Any]] = None
settings: Optional[Dict[str, Any]] = None
pdk: Optional[str] = None
ports: Optional[Dict[str, str]] = None
class Config:
extra = Extra.forbid
# factory: Dict[str, ComponentFactory] = {}
# def add_instance(self, name: str, component: str, **settings) -> None:
# assert component in self.factory.keys()
# component_model = ComponentModel(component=component, settings=settings)
# self.instances[name] = component_model
# def add_route(self, port1: Port, port2: Port, **settings) -> None:
# self.routes = component_model
RouteFactory = Callable[..., Route]
class TypedArray(np.ndarray):
"""based on https://github.com/samuelcolvin/pydantic/issues/380"""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return np.array(val, dtype=cls.inner_type)
class ArrayMeta(type):
def __getitem__(self, t):
return type("Array", (TypedArray,), {"inner_type": t})
class Array(np.ndarray, metaclass=ArrayMeta):
pass
__all__ = (
"ComponentFactory",
"ComponentFactoryDict",
"ComponentSpec",
"ComponentOrPath",
"ComponentOrReference",
"Coordinate",
"Coordinates",
"CrossSectionFactory",
"CrossSectionOrFactory",
"MultiCrossSectionAngleSpec",
"Float2",
"Float3",
"Floats",
"Int2",
"Int3",
"Ints",
"Layer",
"Layers",
"NameToFunctionDict",
"Number",
"PathType",
"PathTypes",
"Route",
"RouteFactory",
"Routes",
"Strs",
)
def write_schema(model: BaseModel = NetlistModel) -> None:
s = model.schema_json()
d = OmegaConf.create(s)
dirpath = pathlib.Path(__file__).parent / "schemas"
f1 = dirpath / "netlist.yaml"
f1.write_text(OmegaConf.to_yaml(d))
f2 = dirpath / "netlist.json"
f2.write_text(json.dumps(OmegaConf.to_container(d)))
if __name__ == "__main__":
write_schema()
import jsonschema
import yaml
from gdsfactory.config import CONFIG
schema_path = CONFIG["schema_netlist"]
schema_dict = json.loads(schema_path.read_text())
yaml_text = """
name: mzi
pdk: ubcpdk
settings:
dy: -90
info:
polarization: te
wavelength: 1.55
description: mzi for ubcpdk
instances:
yr:
component: y_splitter
yl:
component: y_splitter
placements:
yr:
rotation: 180
x: 100
y: 0
routes:
route_top:
links:
yl,opt2: yr,opt3
settings:
cross_section: strip
route_bot:
links:
yl,opt3: yr,opt2
routing_strategy: get_bundle_from_steps
settings:
steps: [dx: 30, dy: '${settings.dy}', dx: 20]
cross_section: strip
ports:
o1: yl,opt1
o2: yr,opt1
"""
yaml_dict = yaml.safe_load(yaml_text)
jsonschema.validate(yaml_dict, schema_dict)
# from gdsfactory.components import factory
# c = NetlistModel(factory=factory)
# c.add_instance("mmi1", "mmi1x2", length=13.3)
| """In programming, a factory is a function that returns an object.
Functions are easy to understand because they have clear inputs and outputs.
Most gdsfactory functions take some inputs and return a Component object.
Some of these inputs parameters are also functions.
- Component: Object with.
- name.
- references: to other components (x, y, rotation).
- polygons in different layers.
- ports dict.
- Route: dataclass with 3 attributes.
- references: list of references (straights, bends and tapers).
- ports: dict(input=PortIn, output=PortOut).
- length: how long is this route?
Factories:
- ComponentFactory: function that returns a Component.
- RouteFactory: function that returns a Route.
Specs:
- ComponentSpec: Component, ComponentFactory or dict(component=mzi, settings=dict(delta_length=20)).
- LayerSpec: (3, 0), 3 (asumes 0 as datatype) or string.
"""
import json
import pathlib
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import numpy as np
from omegaconf import OmegaConf
from phidl.device_layout import Label as LabelPhidl
from phidl.device_layout import Path
from pydantic import BaseModel, Extra
from typing_extensions import Literal
from gdsfactory.component import Component, ComponentReference
from gdsfactory.cross_section import CrossSection
from gdsfactory.port import Port
Anchor = Literal[
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
Axis = Literal["x", "y"]
NSEW = Literal["N", "S", "E", "W"]
class Label(LabelPhidl):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
"""check with pydantic Label valid type"""
assert isinstance(v, LabelPhidl), f"TypeError, Got {type(v)}, expecting Label"
return v
Float2 = Tuple[float, float]
Float3 = Tuple[float, float, float]
Floats = Tuple[float, ...]
Strs = Tuple[str, ...]
Int2 = Tuple[int, int]
Int3 = Tuple[int, int, int]
Ints = Tuple[int, ...]
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
LayerSpec = NewType("LayerSpec", Union[Layer, int, str, None])
LayerSpecs = Tuple[LayerSpec, ...]
ComponentFactory = Callable[..., Component]
ComponentFactoryDict = Dict[str, ComponentFactory]
PathFactory = Callable[..., Path]
PathType = Union[str, pathlib.Path]
PathTypes = Tuple[PathType, ...]
ComponentOrPath = Union[PathType, Component]
ComponentOrReference = Union[Component, ComponentReference]
NameToFunctionDict = Dict[str, ComponentFactory]
Number = Union[float, int]
Coordinate = Tuple[float, float]
Coordinates = Tuple[Coordinate, ...]
ComponentOrPath = Union[Component, PathType]
CrossSectionFactory = Callable[..., CrossSection]
CrossSectionOrFactory = Union[CrossSection, Callable[..., CrossSection]]
PortSymmetries = Dict[str, Dict[str, List[str]]]
PortsDict = Dict[str, Port]
PortsList = Dict[str, Port]
ComponentSpec = NewType(
"ComponentSpec", Union[str, ComponentFactory, Component, Dict[str, Any]]
)
ComponentSpecOrList = Union[ComponentSpec, List[ComponentSpec]]
CellSpec = Union[str, ComponentFactory, Dict[str, Any]]
ComponentSpecDict = Dict[str, ComponentSpec]
CrossSectionSpec = NewType(
"CrossSectionSpec", Union[str, CrossSectionFactory, CrossSection, Dict[str, Any]]
)
MultiCrossSectionAngleSpec = List[Tuple[CrossSectionSpec, Tuple[int, ...]]]
class Route(BaseModel):
references: List[ComponentReference]
labels: Optional[List[Label]] = None
ports: Tuple[Port, Port]
length: float
class Config:
extra = Extra.forbid
class Routes(BaseModel):
references: List[ComponentReference]
lengths: List[float]
ports: Optional[List[Port]] = None
bend_radius: Optional[List[float]] = None
class Config:
extra = Extra.forbid
class ComponentModel(BaseModel):
component: Union[str, Dict[str, Any]]
settings: Optional[Dict[str, Any]]
class Config:
extra = Extra.forbid
class PlacementModel(BaseModel):
x: Union[str, float] = 0
y: Union[str, float] = 0
xmin: Optional[Union[str, float]] = None
ymin: Optional[Union[str, float]] = None
xmax: Optional[Union[str, float]] = None
ymax: Optional[Union[str, float]] = None
dx: float = 0
dy: float = 0
port: Optional[Union[str, Anchor]] = None
rotation: int = 0
mirror: bool = False
class Config:
extra = Extra.forbid
class RouteModel(BaseModel):
links: Dict[str, str]
settings: Optional[Dict[str, Any]] = None
routing_strategy: Optional[str] = None
class Config:
extra = Extra.forbid
class NetlistModel(BaseModel):
"""Netlist defined component.
Attributes:
instances: dict of instances (name, settings, component).
placements: dict of placements.
connections: dict of connections.
routes: dict of routes.
name: component name.
info: information (polarization, wavelength ...).
settings: input variables.
pdk: pdk module name.
ports: exposed component ports.
"""
instances: Dict[str, ComponentModel]
placements: Optional[Dict[str, PlacementModel]] = None
connections: Optional[List[Dict[str, str]]] = None
routes: Optional[Dict[str, RouteModel]] = None
name: Optional[str] = None
info: Optional[Dict[str, Any]] = None
settings: Optional[Dict[str, Any]] = None
pdk: Optional[str] = None
ports: Optional[Dict[str, str]] = None
class Config:
extra = Extra.forbid
# factory: Dict[str, ComponentFactory] = {}
# def add_instance(self, name: str, component: str, **settings) -> None:
# assert component in self.factory.keys()
# component_model = ComponentModel(component=component, settings=settings)
# self.instances[name] = component_model
# def add_route(self, port1: Port, port2: Port, **settings) -> None:
# self.routes = component_model
RouteFactory = Callable[..., Route]
class TypedArray(np.ndarray):
"""based on https://github.com/samuelcolvin/pydantic/issues/380"""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return np.array(val, dtype=cls.inner_type)
class ArrayMeta(type):
def __getitem__(self, t):
return type("Array", (TypedArray,), {"inner_type": t})
class Array(np.ndarray, metaclass=ArrayMeta):
pass
__all__ = (
"ComponentFactory",
"ComponentFactoryDict",
"ComponentSpec",
"ComponentOrPath",
"ComponentOrReference",
"Coordinate",
"Coordinates",
"CrossSectionFactory",
"CrossSectionOrFactory",
"MultiCrossSectionAngleSpec",
"Float2",
"Float3",
"Floats",
"Int2",
"Int3",
"Ints",
"Layer",
"Layers",
"NameToFunctionDict",
"Number",
"PathType",
"PathTypes",
"Route",
"RouteFactory",
"Routes",
"Strs",
)
def write_schema(model: BaseModel = NetlistModel) -> None:
s = model.schema_json()
d = OmegaConf.create(s)
dirpath = pathlib.Path(__file__).parent / "schemas"
f1 = dirpath / "netlist.yaml"
f1.write_text(OmegaConf.to_yaml(d))
f2 = dirpath / "netlist.json"
f2.write_text(json.dumps(OmegaConf.to_container(d)))
if __name__ == "__main__":
write_schema()
import jsonschema
import yaml
from gdsfactory.config import CONFIG
schema_path = CONFIG["schema_netlist"]
schema_dict = json.loads(schema_path.read_text())
yaml_text = """
name: mzi
pdk: ubcpdk
settings:
dy: -90
info:
polarization: te
wavelength: 1.55
description: mzi for ubcpdk
instances:
yr:
component: y_splitter
yl:
component: y_splitter
placements:
yr:
rotation: 180
x: 100
y: 0
routes:
route_top:
links:
yl,opt2: yr,opt3
settings:
cross_section: strip
route_bot:
links:
yl,opt3: yr,opt2
routing_strategy: get_bundle_from_steps
settings:
steps: [dx: 30, dy: '${settings.dy}', dx: 20]
cross_section: strip
ports:
o1: yl,opt1
o2: yr,opt1
"""
yaml_dict = yaml.safe_load(yaml_text)
jsonschema.validate(yaml_dict, schema_dict)
# from gdsfactory.components import factory
# c = NetlistModel(factory=factory)
# c.add_instance("mmi1", "mmi1x2", length=13.3) | en | 0.67182 | In programming, a factory is a function that returns an object. Functions are easy to understand because they have clear inputs and outputs. Most gdsfactory functions take some inputs and return a Component object. Some of these inputs parameters are also functions. - Component: Object with. - name. - references: to other components (x, y, rotation). - polygons in different layers. - ports dict. - Route: dataclass with 3 attributes. - references: list of references (straights, bends and tapers). - ports: dict(input=PortIn, output=PortOut). - length: how long is this route? Factories: - ComponentFactory: function that returns a Component. - RouteFactory: function that returns a Route. Specs: - ComponentSpec: Component, ComponentFactory or dict(component=mzi, settings=dict(delta_length=20)). - LayerSpec: (3, 0), 3 (asumes 0 as datatype) or string. check with pydantic Label valid type Netlist defined component. Attributes: instances: dict of instances (name, settings, component). placements: dict of placements. connections: dict of connections. routes: dict of routes. name: component name. info: information (polarization, wavelength ...). settings: input variables. pdk: pdk module name. ports: exposed component ports. # factory: Dict[str, ComponentFactory] = {} # def add_instance(self, name: str, component: str, **settings) -> None: # assert component in self.factory.keys() # component_model = ComponentModel(component=component, settings=settings) # self.instances[name] = component_model # def add_route(self, port1: Port, port2: Port, **settings) -> None: # self.routes = component_model based on https://github.com/samuelcolvin/pydantic/issues/380 name: mzi pdk: ubcpdk settings: dy: -90 info: polarization: te wavelength: 1.55 description: mzi for ubcpdk instances: yr: component: y_splitter yl: component: y_splitter placements: yr: rotation: 180 x: 100 y: 0 routes: route_top: links: yl,opt2: yr,opt3 settings: cross_section: strip route_bot: links: yl,opt3: yr,opt2 routing_strategy: get_bundle_from_steps settings: steps: [dx: 30, dy: '${settings.dy}', dx: 20] cross_section: strip ports: o1: yl,opt1 o2: yr,opt1 # from gdsfactory.components import factory # c = NetlistModel(factory=factory) # c.add_instance("mmi1", "mmi1x2", length=13.3) | 3.805895 | 4 |
tests/_site/myauth/models.py | ahmetdaglarbas/e-commerce | 2 | 7456 | # -*- coding: utf-8 -*-
# Code will only work with Django >= 1.5. See tests/config.py
import re
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager
from oscar.apps.customer.abstract_models import AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, username, email, password):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=CustomUserManager.normalize_email(email),
username=username,
is_active=True,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password=password)
u.is_admin = True
u.is_staff = True
u.save(using=self._db)
return u
class User(AbstractUser):
"""
Custom user based on Oscar's AbstractUser
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
extra_field = models.CharField(
_('Nobody needs me'), max_length=5, blank=True)
objects = CustomUserManager()
class Meta:
app_label = 'myauth'
| # -*- coding: utf-8 -*-
# Code will only work with Django >= 1.5. See tests/config.py
import re
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager
from oscar.apps.customer.abstract_models import AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, username, email, password):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=CustomUserManager.normalize_email(email),
username=username,
is_active=True,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password=password)
u.is_admin = True
u.is_staff = True
u.save(using=self._db)
return u
class User(AbstractUser):
"""
Custom user based on Oscar's AbstractUser
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
extra_field = models.CharField(
_('Nobody needs me'), max_length=5, blank=True)
objects = CustomUserManager()
class Meta:
app_label = 'myauth'
| en | 0.859163 | # -*- coding: utf-8 -*- # Code will only work with Django >= 1.5. See tests/config.py Creates and saves a User with the given email and password. Custom user based on Oscar's AbstractUser | 2.390666 | 2 |
5 - FC layers retraining/4 - FC weights to C++ code/weights_pck_to_cpp_unrolled_loop.py | brouwa/CNNs-on-FPSPs | 1 | 7457 | import pickle
import numpy as np
INPUT_FILENAME = 'NP_WEIGHTS.pck'
PRECISION = 100
# Open weights
fc1_k, fc1_b, fc2_k, fc2_b = pickle.load(
open(INPUT_FILENAME, 'rb'))
# Round them
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k*PRECISION//1, fc1_b*PRECISION//1, fc2_k*PRECISION//1, fc2_b*PRECISION*PRECISION//1
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k.astype(np.int), fc1_b.astype(np.int), fc2_k.astype(np.int), fc2_b.astype(np.int)
"""
0: GENERATE C++ ARRAYS, TO BE USED IN A STANDARD LOOP
"""
OUTPUT_FILENAME = 'fc_weights_arrays.cpp'
def to_cpp_1_dim(array):
txt = '{\t'
for coeff in array[:-1]:
txt += str(coeff) + ',\t'
txt += str(array[-1]) + '}'
return txt
def to_cpp_2_dims(array):
txt = '{'
for line in array[:-1]:
txt += to_cpp_1_dim(line) + ',\n'
txt += to_cpp_1_dim(array[-1]) + '}'
return txt
# Generate .cpp text
out = 'int fc1_k[' + str(fc1_k.shape[0]) + '][' + str(fc1_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc1_k) + ';\n\n'
out += 'int fc1_b[' + str(fc1_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc1_b) + ';\n\n'
out += 'int fc2_k[' + str(fc2_k.shape[0]) + '][' + str(fc2_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc2_k) + ';\n\n'
out += 'int fc2_b[' + str(fc2_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc2_b) + ';\n\n'
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
1: GENERATE C++ LOOP, USING THE ABOVE ARRAY
"""
OUTPUT_FILENAME = 'fc_loop_unrolled.cpp'
def to_cpp_function(k, b, function_name, in_dim, out_dim):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+k+"["+str(i)+"]["+str(j)+"]) +\n"
out += "\t\t("+b+"["+str(j)+"]);\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function('fc1_k', 'fc1_b', 'fc_1', 27, 50)
# Second layer
out += to_cpp_function('fc2_k', 'fc2_b', 'fc_2', 50, 10)
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
3: GENERATE C++ LOOP, WITH HARDCODED WEIGHTS
"""
OUTPUT_FILENAME = 'fc_loop_unrolled_hardcoded_weights.cpp'
def to_cpp_function(k, b, function_name):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
(in_dim, out_dim) = k.shape
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+str(k[i][j])+") +\n"
out += "\t\t("+str(b[j])+");\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function(fc1_k, fc1_b, 'fc_1')
# Second layer
out += to_cpp_function(fc2_k, fc2_b, 'fc_2')
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
| import pickle
import numpy as np
INPUT_FILENAME = 'NP_WEIGHTS.pck'
PRECISION = 100
# Open weights
fc1_k, fc1_b, fc2_k, fc2_b = pickle.load(
open(INPUT_FILENAME, 'rb'))
# Round them
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k*PRECISION//1, fc1_b*PRECISION//1, fc2_k*PRECISION//1, fc2_b*PRECISION*PRECISION//1
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k.astype(np.int), fc1_b.astype(np.int), fc2_k.astype(np.int), fc2_b.astype(np.int)
"""
0: GENERATE C++ ARRAYS, TO BE USED IN A STANDARD LOOP
"""
OUTPUT_FILENAME = 'fc_weights_arrays.cpp'
def to_cpp_1_dim(array):
txt = '{\t'
for coeff in array[:-1]:
txt += str(coeff) + ',\t'
txt += str(array[-1]) + '}'
return txt
def to_cpp_2_dims(array):
txt = '{'
for line in array[:-1]:
txt += to_cpp_1_dim(line) + ',\n'
txt += to_cpp_1_dim(array[-1]) + '}'
return txt
# Generate .cpp text
out = 'int fc1_k[' + str(fc1_k.shape[0]) + '][' + str(fc1_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc1_k) + ';\n\n'
out += 'int fc1_b[' + str(fc1_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc1_b) + ';\n\n'
out += 'int fc2_k[' + str(fc2_k.shape[0]) + '][' + str(fc2_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc2_k) + ';\n\n'
out += 'int fc2_b[' + str(fc2_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc2_b) + ';\n\n'
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
1: GENERATE C++ LOOP, USING THE ABOVE ARRAY
"""
OUTPUT_FILENAME = 'fc_loop_unrolled.cpp'
def to_cpp_function(k, b, function_name, in_dim, out_dim):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+k+"["+str(i)+"]["+str(j)+"]) +\n"
out += "\t\t("+b+"["+str(j)+"]);\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function('fc1_k', 'fc1_b', 'fc_1', 27, 50)
# Second layer
out += to_cpp_function('fc2_k', 'fc2_b', 'fc_2', 50, 10)
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
3: GENERATE C++ LOOP, WITH HARDCODED WEIGHTS
"""
OUTPUT_FILENAME = 'fc_loop_unrolled_hardcoded_weights.cpp'
def to_cpp_function(k, b, function_name):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
(in_dim, out_dim) = k.shape
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+str(k[i][j])+") +\n"
out += "\t\t("+str(b[j])+");\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function(fc1_k, fc1_b, 'fc_1')
# Second layer
out += to_cpp_function(fc2_k, fc2_b, 'fc_2')
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
| en | 0.625473 | # Open weights # Round them 0: GENERATE C++ ARRAYS, TO BE USED IN A STANDARD LOOP # Generate .cpp text # Output it 1: GENERATE C++ LOOP, USING THE ABOVE ARRAY Generates C++ code for computing a fully connected layer of int values, applying weights k and bias b, with hardcoded values in the source code. The function is names after function_name. ## Generate .cpp text # First layer # Second layer # Output it 3: GENERATE C++ LOOP, WITH HARDCODED WEIGHTS Generates C++ code for computing a fully connected layer of int values, applying weights k and bias b, with hardcoded values in the source code. The function is names after function_name. ## Generate .cpp text # First layer # Second layer # Output it | 2.48808 | 2 |
python/Canny_EdgeDetection.py | yubaoliu/Computer-Vision | 0 | 7458 | <gh_stars>0
import cv2
import numpy as np
import random
img = cv2.imread('../../Assets/Images/flower-white.jpeg', 1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
cv2.imshow('img', img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgG = cv2.GaussianBlur(gray, (3, 3), 0)
dst = cv2.Canny(img, 50, 50)
cv2.imshow('dst', dst)
cv2.waitKey(0) | import cv2
import numpy as np
import random
img = cv2.imread('../../Assets/Images/flower-white.jpeg', 1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
cv2.imshow('img', img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgG = cv2.GaussianBlur(gray, (3, 3), 0)
dst = cv2.Canny(img, 50, 50)
cv2.imshow('dst', dst)
cv2.waitKey(0) | none | 1 | 2.652159 | 3 |
|
avod/core/trainer_stride.py | Guoxs/DODT | 1 | 7459 | <reponame>Guoxs/DODT<filename>avod/core/trainer_stride.py
"""Detection model trainer.
This file provides a generic training method to train a
DetectionModel.
"""
import datetime
import os
import tensorflow as tf
import time
from avod.builders import optimizer_builder
from avod.core import trainer_utils
from avod.core import summary_utils
slim = tf.contrib.slim
def train(model, train_config):
"""Training function for detection models.
Args:
model: The detection model object.
train_config: a train_*pb2 protobuf.
training i.e. loading RPN weights onto AVOD model.
"""
model = model
train_config = train_config
# Get model configurations
model_config = model.model_config
# Create a variable tensor to hold the global step
global_step_tensor = tf.Variable(
0, trainable=False, name='global_step')
#############################
# Get training configurations
#############################
max_iterations = train_config.max_iterations
summary_interval = train_config.summary_interval
checkpoint_interval = train_config.checkpoint_interval
max_checkpoints = train_config.max_checkpoints_to_keep
paths_config = model_config.paths_config
logdir = paths_config.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
checkpoint_dir = paths_config.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_path = checkpoint_dir + '/' + \
model_config.checkpoint_name
pretrained_checkpoint_dir = checkpoint_dir + '/../../' + \
'pyramid_cars_with_aug_dt_5_tracking_corr_pretrained/checkpoints'
global_summaries = set([])
# The model should return a dictionary of predictions
prediction_dict = model.build()
summary_histograms = train_config.summary_histograms
summary_img_images = train_config.summary_img_images
summary_bev_images = train_config.summary_bev_images
# get variables to train
if not train_config.use_pretrained_model:
variable_to_train = None
else:
trainable_variables = tf.trainable_variables()
variable_to_train = trainable_variables[68:72] + \
trainable_variables[96:]
##############################
# Setup loss
##############################
losses_dict, total_loss = model.loss(prediction_dict)
# Optimizer
training_optimizer = optimizer_builder.build(
train_config.optimizer,
global_summaries,
global_step_tensor)
# Create the train op
with tf.variable_scope('train_op'):
train_op = slim.learning.create_train_op(
total_loss,
training_optimizer,
variables_to_train=variable_to_train,
clip_gradient_norm=1.0,
global_step=global_step_tensor)
# Add the result of the train_op to the summary
tf.summary.scalar("training_loss", train_op)
# Add maximum memory usage summary op
# This op can only be run on device with gpu
# so it's skipped on travis
is_travis = 'TRAVIS' in os.environ
if not is_travis:
# tf.summary.scalar('bytes_in_use',
# tf.contrib.memory_stats.BytesInUse())
tf.summary.scalar('max_bytes',
tf.contrib.memory_stats.MaxBytesInUse())
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_merged = summary_utils.summaries_to_keep(
summaries,
global_summaries,
histograms=summary_histograms,
input_imgs=summary_img_images,
input_bevs=summary_bev_images
)
allow_gpu_mem_growth = train_config.allow_gpu_mem_growth
if allow_gpu_mem_growth:
# GPU memory config
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
sess = tf.Session(config=config)
else:
sess = tf.Session()
# Create unique folder name using datetime for summary writer
datetime_str = str(datetime.datetime.now())
logdir = logdir + '/train'
train_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,
sess.graph)
# Save checkpoints regularly.
saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)
# Create init op
# if train_config.use_pretrained_model:
# init = tf.initialize_variables(variable_to_train)
# else:
# init = tf.global_variables_initializer()
init = tf.global_variables_initializer()
# Continue from last saved checkpoint
if not train_config.overwrite_checkpoints:
trainer_utils.load_checkpoints(checkpoint_dir,saver)
if len(saver.last_checkpoints) > 0:
checkpoint_to_restore = saver.last_checkpoints[-1]
saver.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
# Read the global step if restored
global_step = tf.train.global_step(sess, global_step_tensor)
print('Starting from step {} / {}'.format(
global_step, max_iterations))
# Main Training Loop
last_time = time.time()
for step in range(global_step, max_iterations + 1):
# Save checkpoint
if step % checkpoint_interval == 0:
global_step = tf.train.global_step(sess,
global_step_tensor)
saver.save(sess,
save_path=checkpoint_path,
global_step=global_step)
print('Step {} / {}, Checkpoint saved to {}-{:08d}'.format(
step, max_iterations,
checkpoint_path, global_step))
feed_dict = model.create_feed_dict()
# Write summaries and train op
if step % summary_interval == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = current_time
train_op_loss, summary_out = sess.run(
[train_op, summary_merged], feed_dict=feed_dict)
print('Step {}, Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
step, train_op_loss, time_elapsed))
train_writer.add_summary(summary_out, step)
else:
# Run the train op only
sess.run(train_op, feed_dict)
# Close the summary writers
train_writer.close() | """Detection model trainer.
This file provides a generic training method to train a
DetectionModel.
"""
import datetime
import os
import tensorflow as tf
import time
from avod.builders import optimizer_builder
from avod.core import trainer_utils
from avod.core import summary_utils
slim = tf.contrib.slim
def train(model, train_config):
"""Training function for detection models.
Args:
model: The detection model object.
train_config: a train_*pb2 protobuf.
training i.e. loading RPN weights onto AVOD model.
"""
model = model
train_config = train_config
# Get model configurations
model_config = model.model_config
# Create a variable tensor to hold the global step
global_step_tensor = tf.Variable(
0, trainable=False, name='global_step')
#############################
# Get training configurations
#############################
max_iterations = train_config.max_iterations
summary_interval = train_config.summary_interval
checkpoint_interval = train_config.checkpoint_interval
max_checkpoints = train_config.max_checkpoints_to_keep
paths_config = model_config.paths_config
logdir = paths_config.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
checkpoint_dir = paths_config.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_path = checkpoint_dir + '/' + \
model_config.checkpoint_name
pretrained_checkpoint_dir = checkpoint_dir + '/../../' + \
'pyramid_cars_with_aug_dt_5_tracking_corr_pretrained/checkpoints'
global_summaries = set([])
# The model should return a dictionary of predictions
prediction_dict = model.build()
summary_histograms = train_config.summary_histograms
summary_img_images = train_config.summary_img_images
summary_bev_images = train_config.summary_bev_images
# get variables to train
if not train_config.use_pretrained_model:
variable_to_train = None
else:
trainable_variables = tf.trainable_variables()
variable_to_train = trainable_variables[68:72] + \
trainable_variables[96:]
##############################
# Setup loss
##############################
losses_dict, total_loss = model.loss(prediction_dict)
# Optimizer
training_optimizer = optimizer_builder.build(
train_config.optimizer,
global_summaries,
global_step_tensor)
# Create the train op
with tf.variable_scope('train_op'):
train_op = slim.learning.create_train_op(
total_loss,
training_optimizer,
variables_to_train=variable_to_train,
clip_gradient_norm=1.0,
global_step=global_step_tensor)
# Add the result of the train_op to the summary
tf.summary.scalar("training_loss", train_op)
# Add maximum memory usage summary op
# This op can only be run on device with gpu
# so it's skipped on travis
is_travis = 'TRAVIS' in os.environ
if not is_travis:
# tf.summary.scalar('bytes_in_use',
# tf.contrib.memory_stats.BytesInUse())
tf.summary.scalar('max_bytes',
tf.contrib.memory_stats.MaxBytesInUse())
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_merged = summary_utils.summaries_to_keep(
summaries,
global_summaries,
histograms=summary_histograms,
input_imgs=summary_img_images,
input_bevs=summary_bev_images
)
allow_gpu_mem_growth = train_config.allow_gpu_mem_growth
if allow_gpu_mem_growth:
# GPU memory config
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
sess = tf.Session(config=config)
else:
sess = tf.Session()
# Create unique folder name using datetime for summary writer
datetime_str = str(datetime.datetime.now())
logdir = logdir + '/train'
train_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,
sess.graph)
# Save checkpoints regularly.
saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)
# Create init op
# if train_config.use_pretrained_model:
# init = tf.initialize_variables(variable_to_train)
# else:
# init = tf.global_variables_initializer()
init = tf.global_variables_initializer()
# Continue from last saved checkpoint
if not train_config.overwrite_checkpoints:
trainer_utils.load_checkpoints(checkpoint_dir,saver)
if len(saver.last_checkpoints) > 0:
checkpoint_to_restore = saver.last_checkpoints[-1]
saver.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
# Read the global step if restored
global_step = tf.train.global_step(sess, global_step_tensor)
print('Starting from step {} / {}'.format(
global_step, max_iterations))
# Main Training Loop
last_time = time.time()
for step in range(global_step, max_iterations + 1):
# Save checkpoint
if step % checkpoint_interval == 0:
global_step = tf.train.global_step(sess,
global_step_tensor)
saver.save(sess,
save_path=checkpoint_path,
global_step=global_step)
print('Step {} / {}, Checkpoint saved to {}-{:08d}'.format(
step, max_iterations,
checkpoint_path, global_step))
feed_dict = model.create_feed_dict()
# Write summaries and train op
if step % summary_interval == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = current_time
train_op_loss, summary_out = sess.run(
[train_op, summary_merged], feed_dict=feed_dict)
print('Step {}, Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
step, train_op_loss, time_elapsed))
train_writer.add_summary(summary_out, step)
else:
# Run the train op only
sess.run(train_op, feed_dict)
# Close the summary writers
train_writer.close() | en | 0.563569 | Detection model trainer. This file provides a generic training method to train a DetectionModel. Training function for detection models. Args: model: The detection model object. train_config: a train_*pb2 protobuf. training i.e. loading RPN weights onto AVOD model. # Get model configurations # Create a variable tensor to hold the global step ############################# # Get training configurations ############################# # The model should return a dictionary of predictions # get variables to train ############################## # Setup loss ############################## # Optimizer # Create the train op # Add the result of the train_op to the summary # Add maximum memory usage summary op # This op can only be run on device with gpu # so it's skipped on travis # tf.summary.scalar('bytes_in_use', # tf.contrib.memory_stats.BytesInUse()) # GPU memory config # Create unique folder name using datetime for summary writer # Save checkpoints regularly. # Create init op # if train_config.use_pretrained_model: # init = tf.initialize_variables(variable_to_train) # else: # init = tf.global_variables_initializer() # Continue from last saved checkpoint # load pretrained model # load pretrained model # Read the global step if restored # Main Training Loop # Save checkpoint # Write summaries and train op # Run the train op only # Close the summary writers | 2.583208 | 3 |
rest_framework_hmac/hmac_key/models.py | nickc92/django-rest-framework-hmac | 0 | 7460 | <gh_stars>0
import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
class HMACKey(models.Model):
"""
The default HMACKey model that can auto generate a
key/secret for HMAC Auth via a signal
"""
def generate_key():
"""
Returns a 40 character hex string based on binary random data
"""
return binascii.hexlify(os.urandom(20)).decode()
key = models.CharField(
_("Key"), primary_key=True, max_length=40, default=generate_key)
secret = models.CharField(
_("Secret"), max_length=40, default=generate_key)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='hmac_key',
on_delete=models.CASCADE, verbose_name=_("User")
)
nonce = models.BigIntegerField(default=1)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
# Only create a DB table for this Model if this app is registered
abstract = 'rest_framework_hmac.hmac_key' \
not in settings.INSTALLED_APPS
verbose_name = _("HMACKey")
verbose_name_plural = _("HMACKey")
def __str__(self):
return self.key
| import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
class HMACKey(models.Model):
"""
The default HMACKey model that can auto generate a
key/secret for HMAC Auth via a signal
"""
def generate_key():
"""
Returns a 40 character hex string based on binary random data
"""
return binascii.hexlify(os.urandom(20)).decode()
key = models.CharField(
_("Key"), primary_key=True, max_length=40, default=generate_key)
secret = models.CharField(
_("Secret"), max_length=40, default=generate_key)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='hmac_key',
on_delete=models.CASCADE, verbose_name=_("User")
)
nonce = models.BigIntegerField(default=1)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
# Only create a DB table for this Model if this app is registered
abstract = 'rest_framework_hmac.hmac_key' \
not in settings.INSTALLED_APPS
verbose_name = _("HMACKey")
verbose_name_plural = _("HMACKey")
def __str__(self):
return self.key | en | 0.735595 | The default HMACKey model that can auto generate a key/secret for HMAC Auth via a signal Returns a 40 character hex string based on binary random data # Only create a DB table for this Model if this app is registered | 2.263792 | 2 |
test/conftest.py | Geoiv/river | 0 | 7461 | <gh_stars>0
import os
from tempfile import NamedTemporaryFile
import boto3
from moto import mock_s3
import pandas as pd
import pandavro as pdx
import pickle
import pytest
@pytest.fixture(autouse=True, scope='session')
def aws_credentials():
"""
Sets AWS credentials to invalid values. Applied to all test functions and
scoped to the entire testing session, so there's no chance of interfering
with production buckets.
"""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket'
@pytest.fixture
def test_keys():
"""List of keys to be used for populating a bucket with empty objects"""
return sorted([
'test_key_0.csv',
'folder0/test_key_1.pq',
'folder1/test_key_2.pkl',
'folder1/subfolder0/test_key_3.pkl',
'folder2/'
])
@pytest.fixture
def test_df_keys():
"""List of keys to be used for populating a bucket with DataFrames"""
return {
'avro': ['df.avro'],
'csv': ['df.csv'],
'csv.gz': ['df.csv.gz'],
'csv.zip': ['df.csv.zip'],
'csv.bz2': ['df.csv.bz2'],
'csv.xz': ['df.csv.xz'],
'psv': ['df.psv'],
'psv.gz': ['df.psv.gz'],
'psv.zip': ['df.psv.zip'],
'psv.bz2': ['df.psv.bz2'],
'psv.xz': ['df.psv.xz'],
'feather': ['df.feather'],
'json': ['df.json'],
'pkl': ['df.pkl', 'df.pickle'],
'pq': ['df.pq', 'df.parquet']
}
@pytest.fixture
def test_df():
"""
Universal dataframe for use throughout testing. Multiple data types
used to test for proper encoding/decoding.
"""
return pd.DataFrame({
'intcol': [1, 2, 3],
'strcol': ['four', 'five', 'six'],
'floatcol': [7.0, 8.5, 9.0]
})
@pytest.fixture
def mock_s3_client():
"""Mocks all s3 connections in any test or fixture that includes it"""
with mock_s3():
yield
@pytest.fixture
def setup_bucket_w_contents(mock_s3_client, test_bucket, test_keys):
"""
Sets up a bucket with objects containing the empty string, based off
keys in 'test_keys'
"""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
for key in test_keys:
s3.put_object(Bucket=test_bucket, Key=key, Body='')
yield
@pytest.fixture
def setup_bucket_wo_contents(mock_s3_client, test_bucket):
"""Sets up a bucket with no contents."""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
yield
@pytest.fixture
def setup_bucket_w_dfs(mock_s3_client, test_bucket, test_df, test_df_keys):
"""
Sets up a bucket populated with dataframes that contain the data as
defined in 'test_df', at the keys and storage formats defined in
'test_df_keys'
"""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
for key in test_df_keys['avro']:
with NamedTemporaryFile() as tmpfile:
pdx.to_avro(tmpfile, test_df)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv']:
with NamedTemporaryFile() as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.gz']:
with NamedTemporaryFile(suffix='.csv.gz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.zip']:
with NamedTemporaryFile(suffix='.csv.zip') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.bz2']:
with NamedTemporaryFile(suffix='.csv.bz2') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.xz']:
with NamedTemporaryFile(suffix='.csv.xz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv']:
with NamedTemporaryFile() as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.gz']:
with NamedTemporaryFile(suffix='.psv.gz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.zip']:
with NamedTemporaryFile(suffix='.psv.zip') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.bz2']:
with NamedTemporaryFile(suffix='.psv.bz2') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.xz']:
with NamedTemporaryFile(suffix='.psv.xz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['feather']:
with NamedTemporaryFile() as tmpfile:
test_df.to_feather(tmpfile.name)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['json']:
with NamedTemporaryFile() as tmpfile:
test_df.to_json(tmpfile.name)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['pkl']:
with NamedTemporaryFile() as tmpfile:
pickle.dump(test_df, tmpfile, protocol=pickle.HIGHEST_PROTOCOL)
tmpfile.flush()
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['pq']:
with NamedTemporaryFile() as tmpfile:
test_df.to_parquet(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
yield
| import os
from tempfile import NamedTemporaryFile
import boto3
from moto import mock_s3
import pandas as pd
import pandavro as pdx
import pickle
import pytest
@pytest.fixture(autouse=True, scope='session')
def aws_credentials():
"""
Sets AWS credentials to invalid values. Applied to all test functions and
scoped to the entire testing session, so there's no chance of interfering
with production buckets.
"""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket'
@pytest.fixture
def test_keys():
"""List of keys to be used for populating a bucket with empty objects"""
return sorted([
'test_key_0.csv',
'folder0/test_key_1.pq',
'folder1/test_key_2.pkl',
'folder1/subfolder0/test_key_3.pkl',
'folder2/'
])
@pytest.fixture
def test_df_keys():
"""List of keys to be used for populating a bucket with DataFrames"""
return {
'avro': ['df.avro'],
'csv': ['df.csv'],
'csv.gz': ['df.csv.gz'],
'csv.zip': ['df.csv.zip'],
'csv.bz2': ['df.csv.bz2'],
'csv.xz': ['df.csv.xz'],
'psv': ['df.psv'],
'psv.gz': ['df.psv.gz'],
'psv.zip': ['df.psv.zip'],
'psv.bz2': ['df.psv.bz2'],
'psv.xz': ['df.psv.xz'],
'feather': ['df.feather'],
'json': ['df.json'],
'pkl': ['df.pkl', 'df.pickle'],
'pq': ['df.pq', 'df.parquet']
}
@pytest.fixture
def test_df():
"""
Universal dataframe for use throughout testing. Multiple data types
used to test for proper encoding/decoding.
"""
return pd.DataFrame({
'intcol': [1, 2, 3],
'strcol': ['four', 'five', 'six'],
'floatcol': [7.0, 8.5, 9.0]
})
@pytest.fixture
def mock_s3_client():
"""Mocks all s3 connections in any test or fixture that includes it"""
with mock_s3():
yield
@pytest.fixture
def setup_bucket_w_contents(mock_s3_client, test_bucket, test_keys):
"""
Sets up a bucket with objects containing the empty string, based off
keys in 'test_keys'
"""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
for key in test_keys:
s3.put_object(Bucket=test_bucket, Key=key, Body='')
yield
@pytest.fixture
def setup_bucket_wo_contents(mock_s3_client, test_bucket):
"""Sets up a bucket with no contents."""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
yield
@pytest.fixture
def setup_bucket_w_dfs(mock_s3_client, test_bucket, test_df, test_df_keys):
"""
Sets up a bucket populated with dataframes that contain the data as
defined in 'test_df', at the keys and storage formats defined in
'test_df_keys'
"""
s3 = boto3.client('s3')
s3.create_bucket(Bucket=test_bucket)
for key in test_df_keys['avro']:
with NamedTemporaryFile() as tmpfile:
pdx.to_avro(tmpfile, test_df)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv']:
with NamedTemporaryFile() as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.gz']:
with NamedTemporaryFile(suffix='.csv.gz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.zip']:
with NamedTemporaryFile(suffix='.csv.zip') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.bz2']:
with NamedTemporaryFile(suffix='.csv.bz2') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['csv.xz']:
with NamedTemporaryFile(suffix='.csv.xz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv']:
with NamedTemporaryFile() as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.gz']:
with NamedTemporaryFile(suffix='.psv.gz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.zip']:
with NamedTemporaryFile(suffix='.psv.zip') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.bz2']:
with NamedTemporaryFile(suffix='.psv.bz2') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['psv.xz']:
with NamedTemporaryFile(suffix='.psv.xz') as tmpfile:
test_df.to_csv(tmpfile.name, index=False, sep='|')
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['feather']:
with NamedTemporaryFile() as tmpfile:
test_df.to_feather(tmpfile.name)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['json']:
with NamedTemporaryFile() as tmpfile:
test_df.to_json(tmpfile.name)
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['pkl']:
with NamedTemporaryFile() as tmpfile:
pickle.dump(test_df, tmpfile, protocol=pickle.HIGHEST_PROTOCOL)
tmpfile.flush()
s3.upload_file(tmpfile.name, test_bucket, key)
for key in test_df_keys['pq']:
with NamedTemporaryFile() as tmpfile:
test_df.to_parquet(tmpfile.name, index=False)
s3.upload_file(tmpfile.name, test_bucket, key)
yield | en | 0.891997 | Sets AWS credentials to invalid values. Applied to all test functions and scoped to the entire testing session, so there's no chance of interfering with production buckets. Universal bucket name for use throughout testing List of keys to be used for populating a bucket with empty objects List of keys to be used for populating a bucket with DataFrames Universal dataframe for use throughout testing. Multiple data types used to test for proper encoding/decoding. Mocks all s3 connections in any test or fixture that includes it Sets up a bucket with objects containing the empty string, based off keys in 'test_keys' Sets up a bucket with no contents. Sets up a bucket populated with dataframes that contain the data as defined in 'test_df', at the keys and storage formats defined in 'test_df_keys' | 2.109292 | 2 |
company/migrations/0021_auto_20161208_1113.py | uktrade/directory-api | 2 | 7462 | <filename>company/migrations/0021_auto_20161208_1113.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-08 11:13
from __future__ import unicode_literals
from django.db import migrations
from company import helpers
def ensure_verification_code(apps, schema_editor):
Company = apps.get_model("company", "Company")
for company in Company.objects.filter(verification_code=''):
company.verification_code = helpers.generate_verification_code()
company.save()
class Migration(migrations.Migration):
dependencies = [
('company', '0020_auto_20161208_1056'),
]
operations = [
migrations.RunPython(ensure_verification_code),
]
| <filename>company/migrations/0021_auto_20161208_1113.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-08 11:13
from __future__ import unicode_literals
from django.db import migrations
from company import helpers
def ensure_verification_code(apps, schema_editor):
Company = apps.get_model("company", "Company")
for company in Company.objects.filter(verification_code=''):
company.verification_code = helpers.generate_verification_code()
company.save()
class Migration(migrations.Migration):
dependencies = [
('company', '0020_auto_20161208_1056'),
]
operations = [
migrations.RunPython(ensure_verification_code),
]
| en | 0.821157 | # -*- coding: utf-8 -*- # Generated by Django 1.9.10 on 2016-12-08 11:13 | 1.731173 | 2 |
system/indy-node-tests/TestAuthMapSuite.py | Toktar/indy-test-automation | 0 | 7463 | import pytest
import asyncio
from system.utils import *
from random import randrange as rr
import hashlib
import time
from datetime import datetime, timedelta, timezone
from indy import payment
import logging
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('docker_setup_and_teardown')
class TestAuthMapSuite:
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_nym(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
new_did, new_vk = await did.create_and_store_my_did(wallet_handler, '{}')
# add adder to add nym
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit nym
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', '',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'EDIT', 'verkey', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add nym with verkey by adder
res4 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk) # push adder vk
print(res4)
assert res4['op'] == 'REPLY'
# edit verkey by editor
res5 = await send_nym(pool_handler, wallet_handler, editor_did, new_did, editor_vk) # push editor vk
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another nym with editor did - should be rejected
res6 = await send_nym(pool_handler, wallet_handler, editor_did, random_did_and_json()[0])
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial nym one more time with adder did - should be rejected
res7 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_attrib(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add target nym
target_did, target_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, target_did, target_vk)
assert res['op'] == 'REPLY'
# add adder to add attrib
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit attrib
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '100', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '100', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add attrib for target did by non-owner adder
res4 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value1'}), None)
print(res4)
assert res4['op'] == 'REPLY'
# edit attrib for target did by non-owner editor
res5 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key1': 'value2'}), None)
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another attrib with editor did - should be rejected
res6 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key2': 'value1'}), None)
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial attrib one more time with adder did - should be rejected
res7 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value3'}), None)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_schema(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add schema only
trustee_did, _ = get_default_trustee
# add adder to add schema
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '101', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# add schema
res4 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0', json.dumps(['attr1']))
print(res4)
assert res4[1]['op'] == 'REPLY'
# edit schema - nobody can edit schemas - should be rejected
res5 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0',
json.dumps(['attr1', 'attr2']))
print(res5)
assert res5[1]['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique cred def id
async def test_case_cred_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add cred def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(["age", "sex", "height", "name"]))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '102', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '102', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add cred def
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG1',
None, json.dumps({'support_revocation': False}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit cred def as adder - should be rejected
_request = json.loads(request)
_request['operation']['data']['primary']['n'] = '123456789'
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit cred def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit cred def
request = json.loads(request)
request['operation']['data']['primary']['n'] = '123456'
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another cred def as editor - should be rejected
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG2',
None, json.dumps({'support_revocation': True}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique revoc reg def id
async def test_case_revoc_reg_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '113', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg def
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 1,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg def as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['tailsHash'] = random_string(30)
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg def
request = json.loads(request)
request['operation']['value']['tailsHash'] = random_string(20)
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg def as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 2,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_revoc_reg_entry(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg entry
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for revoc reg def adding - network monitor case
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res21 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res21)
assert res21['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '114', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res22 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res22)
assert res22['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '114', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg entry
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 10,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg entry as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['prevAccum'] = _request['operation']['value']['accum']
_request['operation']['value']['accum'] = random_string(20)
_request['operation']['value']['revoked'] = [7, 8, 9]
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg entry
request = json.loads(request)
request['operation']['value']['prevAccum'] = request['operation']['value']['accum']
request['operation']['value']['accum'] = random_string(10)
request['operation']['value']['revoked'] = [1, 2, 3]
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg entry as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 20,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.skip('INDY-2024')
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_node(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add node
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit node
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '0', 'ADD', 'services', '*', str(['VALIDATOR']),
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '0', 'EDIT', 'services', str(['VALIDATOR']), str([]),
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add node
alias = random_string(5)
client_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
client_port = rr(1, 32767)
node_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
node_port = rr(1, 32767)
req = await ledger.build_node_request(adder_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'client_ip': client_ip,
'client_port': client_port,
'node_ip': node_ip,
'node_port': node_port,
'services': ['VALIDATOR']
}))
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# edit node
req = await ledger.build_node_request(editor_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'services': []
}))
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_upgrade(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to start pool upgrdae
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to cancel pool upgrade
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '109', 'ADD', 'action', '*', 'start',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '109', 'EDIT', 'action', 'start', 'cancel',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# start pool upgrade
init_time = 30
version = '1.9.999'
name = 'upgrade' + '_' + version + '_' + datetime.now(tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z')
_sha256 = hashlib.sha256().hexdigest()
_timeout = 5
reinstall = False
force = False
package = 'indy-node'
dests = ['Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv', '<KEY>',
'DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya', '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA',
'4SWokCJWJc69Tn74VvLS6t2G2ucvXqM9FDMsWJjmsUxe', 'Cv1Ehj43DDM5ttNBmC6VPpEfwXWwfGktHwjDJsTV5Fz8',
'BM8dTooz5uykCbYSAAFwKNkYfT4koomBHsSWHTDtkjhW']
docker_7_schedule = json.dumps(dict(
{dest: datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=init_time + i * 5),
'%Y-%m-%dT%H:%M:%S%z')
for dest, i in zip(dests, range(len(dests)))}
))
req = await ledger.build_pool_upgrade_request(adder_did, name, version, 'start', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# cancel pool upgrade
req = await ledger.build_pool_upgrade_request(editor_did, name, version, 'cancel', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_pool_restart(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add pool restart only
trustee_did, _ = get_default_trustee
# add adder to restart pool
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '118', 'ADD', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# restart pool
req = await ledger.build_pool_restart_request\
(adder_did, 'start', datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=60),
'%Y-%m-%dT%H:%M:%S%z'))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_validator_info(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add validator info only
trustee_did, _ = get_default_trustee
# add adder to get validator info
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '119', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_get_validator_info_request(adder_did)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_config(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit pool config only
trustee_did, _ = get_default_trustee
# add editor to edit pool config
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_pool_config_request(editor_did, False, False)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_auth_rule(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit auth rule only
trustee_did, _ = get_default_trustee
# add editor to edit auth rule
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '120', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
await asyncio.sleep(15)
req = await ledger.build_auth_rule_request(editor_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 5,
'need_to_be_owner': True,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_mint(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet0')}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to mint tokens
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did1,
json.dumps([{"recipient": address, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_set_fees(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num, sig_count):
libsovtoken_payment_method = 'sov'
fees = {'1': 1, '100': 1, '101': 1, '102': 1, '113': 1, '114': 1, '10001': 1}
trustee_did, _ = get_default_trustee
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '20000', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, None)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add editors to set fees
editor_did1, editor_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did1, editor_vk1, None, editor_role)
assert res['op'] == 'REPLY'
editor_did2, editor_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did2, editor_vk2, None, editor_role)
assert res['op'] == 'REPLY'
editor_did3, editor_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did3, editor_vk3, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did1, libsovtoken_payment_method,
json.dumps(fees))
req = await ledger.multi_sign_request(wallet_handler, editor_did1, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did2, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_payment(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address1 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet1')}))
address2 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet2')}))
# set rule for easier mint adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res1)
assert res1['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10001', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# initial minting
req, _ = await payment.build_mint_req(wallet_handler, trustee_did,
json.dumps([{"recipient": address1, "amount": 100}]), None)
res11 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res11)
assert res11['op'] == 'REPLY'
req, _ = await payment.build_get_payment_sources_request(wallet_handler, trustee_did, address1)
res111 = await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req)
source1 = \
json.loads(await payment.parse_get_payment_sources_response(libsovtoken_payment_method,
res111))[0]['source']
if sig_count == 0:
# add identity owner adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to send xfer
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did1,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_forbidden(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
logger.info("1 Adding new trustee to ledger")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("2 Setting forbidden auth rule for adding trustees")
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num,
json.dumps({
'constraint_id': 'FORBIDDEN',
}))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("3 Getting newly set forbidden constraint")
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint']['constraint_id'] == 'FORBIDDEN'
logger.info("4 Trying to add one more trustee")
one_more_new_trustee_did, one_more_new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, one_more_new_trustee_did, one_more_new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_auth_rules(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
steward_role, steward_role_num = 'STEWARD', '2'
logger.info("1 Creating new steward")
steward_did, steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, steward_did, steward_vk, None, steward_role)
assert res['op'] == 'REPLY'
logger.info("2 Creating some new trustee")
_new_trustee_did, _new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, _new_trustee_did, _new_trustee_vk, None, trustee_role)
assert res['op'] == 'REPLY'
logger.info("3 Trying to add new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("4 Trying to add new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("5 Send auth rules txn to allow stewards to add new trustees and stewrds")
one_steward_constraint = {
'constraint_id': 'ROLE',
'role': steward_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}
req = await ledger.build_auth_rules_request(trustee_did, json.dumps([
{
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': trustee_role_num,
'constraint': one_steward_constraint
}, {
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': steward_role_num,
'constraint': one_steward_constraint
},
]))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("6 Getting recently set auth rules")
for role_num in (trustee_role_num, steward_role_num):
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint'] == one_steward_constraint
logger.info("7 Trying to add new trustee using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("8 Trying to add new steward using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("9 Adding new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("10 Adding new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
| import pytest
import asyncio
from system.utils import *
from random import randrange as rr
import hashlib
import time
from datetime import datetime, timedelta, timezone
from indy import payment
import logging
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('docker_setup_and_teardown')
class TestAuthMapSuite:
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_nym(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
new_did, new_vk = await did.create_and_store_my_did(wallet_handler, '{}')
# add adder to add nym
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit nym
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', '',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'EDIT', 'verkey', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add nym with verkey by adder
res4 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk) # push adder vk
print(res4)
assert res4['op'] == 'REPLY'
# edit verkey by editor
res5 = await send_nym(pool_handler, wallet_handler, editor_did, new_did, editor_vk) # push editor vk
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another nym with editor did - should be rejected
res6 = await send_nym(pool_handler, wallet_handler, editor_did, random_did_and_json()[0])
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial nym one more time with adder did - should be rejected
res7 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_attrib(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add target nym
target_did, target_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, target_did, target_vk)
assert res['op'] == 'REPLY'
# add adder to add attrib
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit attrib
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '100', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '100', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add attrib for target did by non-owner adder
res4 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value1'}), None)
print(res4)
assert res4['op'] == 'REPLY'
# edit attrib for target did by non-owner editor
res5 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key1': 'value2'}), None)
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another attrib with editor did - should be rejected
res6 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key2': 'value1'}), None)
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial attrib one more time with adder did - should be rejected
res7 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value3'}), None)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_schema(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add schema only
trustee_did, _ = get_default_trustee
# add adder to add schema
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '101', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# add schema
res4 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0', json.dumps(['attr1']))
print(res4)
assert res4[1]['op'] == 'REPLY'
# edit schema - nobody can edit schemas - should be rejected
res5 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0',
json.dumps(['attr1', 'attr2']))
print(res5)
assert res5[1]['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique cred def id
async def test_case_cred_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add cred def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(["age", "sex", "height", "name"]))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '102', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '102', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add cred def
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG1',
None, json.dumps({'support_revocation': False}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit cred def as adder - should be rejected
_request = json.loads(request)
_request['operation']['data']['primary']['n'] = '123456789'
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit cred def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit cred def
request = json.loads(request)
request['operation']['data']['primary']['n'] = '123456'
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another cred def as editor - should be rejected
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG2',
None, json.dumps({'support_revocation': True}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique revoc reg def id
async def test_case_revoc_reg_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '113', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg def
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 1,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg def as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['tailsHash'] = random_string(30)
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg def
request = json.loads(request)
request['operation']['value']['tailsHash'] = random_string(20)
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg def as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 2,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_revoc_reg_entry(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg entry
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for revoc reg def adding - network monitor case
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res21 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res21)
assert res21['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '114', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res22 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res22)
assert res22['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '114', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg entry
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 10,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg entry as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['prevAccum'] = _request['operation']['value']['accum']
_request['operation']['value']['accum'] = random_string(20)
_request['operation']['value']['revoked'] = [7, 8, 9]
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg entry
request = json.loads(request)
request['operation']['value']['prevAccum'] = request['operation']['value']['accum']
request['operation']['value']['accum'] = random_string(10)
request['operation']['value']['revoked'] = [1, 2, 3]
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg entry as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 20,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.skip('INDY-2024')
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_node(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add node
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit node
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '0', 'ADD', 'services', '*', str(['VALIDATOR']),
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '0', 'EDIT', 'services', str(['VALIDATOR']), str([]),
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add node
alias = random_string(5)
client_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
client_port = rr(1, 32767)
node_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
node_port = rr(1, 32767)
req = await ledger.build_node_request(adder_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'client_ip': client_ip,
'client_port': client_port,
'node_ip': node_ip,
'node_port': node_port,
'services': ['VALIDATOR']
}))
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# edit node
req = await ledger.build_node_request(editor_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'services': []
}))
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_upgrade(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to start pool upgrdae
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to cancel pool upgrade
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '109', 'ADD', 'action', '*', 'start',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '109', 'EDIT', 'action', 'start', 'cancel',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# start pool upgrade
init_time = 30
version = '1.9.999'
name = 'upgrade' + '_' + version + '_' + datetime.now(tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z')
_sha256 = hashlib.sha256().hexdigest()
_timeout = 5
reinstall = False
force = False
package = 'indy-node'
dests = ['Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv', '<KEY>',
'DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya', '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA',
'4SWokCJWJc69Tn74VvLS6t2G2ucvXqM9FDMsWJjmsUxe', 'Cv1Ehj43DDM5ttNBmC6VPpEfwXWwfGktHwjDJsTV5Fz8',
'BM8dTooz5uykCbYSAAFwKNkYfT4koomBHsSWHTDtkjhW']
docker_7_schedule = json.dumps(dict(
{dest: datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=init_time + i * 5),
'%Y-%m-%dT%H:%M:%S%z')
for dest, i in zip(dests, range(len(dests)))}
))
req = await ledger.build_pool_upgrade_request(adder_did, name, version, 'start', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# cancel pool upgrade
req = await ledger.build_pool_upgrade_request(editor_did, name, version, 'cancel', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_pool_restart(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add pool restart only
trustee_did, _ = get_default_trustee
# add adder to restart pool
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '118', 'ADD', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# restart pool
req = await ledger.build_pool_restart_request\
(adder_did, 'start', datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=60),
'%Y-%m-%dT%H:%M:%S%z'))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_validator_info(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add validator info only
trustee_did, _ = get_default_trustee
# add adder to get validator info
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '119', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_get_validator_info_request(adder_did)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_config(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit pool config only
trustee_did, _ = get_default_trustee
# add editor to edit pool config
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_pool_config_request(editor_did, False, False)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_auth_rule(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit auth rule only
trustee_did, _ = get_default_trustee
# add editor to edit auth rule
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '120', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
await asyncio.sleep(15)
req = await ledger.build_auth_rule_request(editor_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 5,
'need_to_be_owner': True,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_mint(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet0')}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to mint tokens
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did1,
json.dumps([{"recipient": address, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_set_fees(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num, sig_count):
libsovtoken_payment_method = 'sov'
fees = {'1': 1, '100': 1, '101': 1, '102': 1, '113': 1, '114': 1, '10001': 1}
trustee_did, _ = get_default_trustee
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '20000', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, None)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add editors to set fees
editor_did1, editor_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did1, editor_vk1, None, editor_role)
assert res['op'] == 'REPLY'
editor_did2, editor_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did2, editor_vk2, None, editor_role)
assert res['op'] == 'REPLY'
editor_did3, editor_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did3, editor_vk3, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did1, libsovtoken_payment_method,
json.dumps(fees))
req = await ledger.multi_sign_request(wallet_handler, editor_did1, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did2, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_payment(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address1 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet1')}))
address2 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet2')}))
# set rule for easier mint adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res1)
assert res1['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10001', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# initial minting
req, _ = await payment.build_mint_req(wallet_handler, trustee_did,
json.dumps([{"recipient": address1, "amount": 100}]), None)
res11 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res11)
assert res11['op'] == 'REPLY'
req, _ = await payment.build_get_payment_sources_request(wallet_handler, trustee_did, address1)
res111 = await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req)
source1 = \
json.loads(await payment.parse_get_payment_sources_response(libsovtoken_payment_method,
res111))[0]['source']
if sig_count == 0:
# add identity owner adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to send xfer
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did1,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_forbidden(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
logger.info("1 Adding new trustee to ledger")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("2 Setting forbidden auth rule for adding trustees")
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num,
json.dumps({
'constraint_id': 'FORBIDDEN',
}))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("3 Getting newly set forbidden constraint")
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint']['constraint_id'] == 'FORBIDDEN'
logger.info("4 Trying to add one more trustee")
one_more_new_trustee_did, one_more_new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, one_more_new_trustee_did, one_more_new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_auth_rules(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
steward_role, steward_role_num = 'STEWARD', '2'
logger.info("1 Creating new steward")
steward_did, steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, steward_did, steward_vk, None, steward_role)
assert res['op'] == 'REPLY'
logger.info("2 Creating some new trustee")
_new_trustee_did, _new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, _new_trustee_did, _new_trustee_vk, None, trustee_role)
assert res['op'] == 'REPLY'
logger.info("3 Trying to add new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("4 Trying to add new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("5 Send auth rules txn to allow stewards to add new trustees and stewrds")
one_steward_constraint = {
'constraint_id': 'ROLE',
'role': steward_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}
req = await ledger.build_auth_rules_request(trustee_did, json.dumps([
{
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': trustee_role_num,
'constraint': one_steward_constraint
}, {
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': steward_role_num,
'constraint': one_steward_constraint
},
]))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("6 Getting recently set auth rules")
for role_num in (trustee_role_num, steward_role_num):
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint'] == one_steward_constraint
logger.info("7 Trying to add new trustee using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("8 Trying to add new steward using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("9 Adding new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("10 Adding new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
| en | 0.883766 | # add adder to add nym # add editor to edit nym # add nym with verkey by adder # push adder vk # edit verkey by editor # push editor vk # negative cases # try to add another nym with editor did - should be rejected # try to edit initial nym one more time with adder did - should be rejected # add target nym # add adder to add attrib # add editor to edit attrib # set rule for adding # set rule for editing # add attrib for target did by non-owner adder # edit attrib for target did by non-owner editor # negative cases # try to add another attrib with editor did - should be rejected # try to edit initial attrib one more time with adder did - should be rejected # we can add schema only # add adder to add schema # set rule for adding # add schema # edit schema - nobody can edit schemas - should be rejected # use the same did with different roles to ADD and EDIT since adder did is a part of unique cred def id # add adder to add cred def # set rule for adding # set rule for editing # add cred def # try to edit cred def as adder - should be rejected # change adder role to edit cred def # edit cred def # try to add another cred def as editor - should be rejected # use the same did with different roles to ADD and EDIT since adder did is a part of unique revoc reg def id # add adder to add revoc reg def # set rule for adding # set rule for editing # add revoc reg def # try to edit revoc reg def as adder - should be rejected # change adder role to edit revoc reg def # edit revoc reg def # try to add another revoc reg def as editor - should be rejected # add adder to add revoc reg entry # set rule for revoc reg def adding - network monitor case # set rule for adding # set rule for editing # add revoc reg entry # try to edit revoc reg entry as adder - should be rejected # change adder role to edit revoc reg def # edit revoc reg entry # try to add another revoc reg entry as editor - should be rejected # add adder to add node # add editor to edit node # set rule for adding # set rule for editing # add node # adder_vk is used as node target did here # edit node # adder_vk is used as node target did here # add adder to start pool upgrdae # add editor to cancel pool upgrade # set rule for adding # set rule for editing # start pool upgrade # cancel pool upgrade # we can add pool restart only # add adder to restart pool # set rule for adding # restart pool # we can add validator info only # add adder to get validator info # set rule for adding # we can edit pool config only # add editor to edit pool config # set rule for editing # we can edit auth rule only # add editor to edit auth rule # set rule for editing # set rule for adding # add identity owner adder to mint tokens # add adder to mint tokens # add adders to mint tokens # set rule for adding # add identity owner editor to set fees # add editor to set fees # add editors to set fees # set rule for easier mint adding # set rule for adding # initial minting # add identity owner adder to send xfer # add adder to send xfer # add adders to send xfer # TODO might make sense to move to separate module since other tests here # organized per txn type # TODO might make sense to move to separate module since other tests here # organized per txn type | 1.898396 | 2 |
test/integration/test_reindex.py | jgough/opensearch-curator | 8 | 7464 | <reponame>jgough/opensearch-curator<filename>test/integration/test_reindex.py
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
import click
from click import testing as clicktest
import time
from . import CuratorTestCase
from unittest.case import SkipTest
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
rhost, rport = os.environ.get('REMOTE_ES_SERVER', 'localhost:9201').split(':')
port = int(port) if port else 9200
rport = int(rport) if rport else 9201
class TestActionFileReindex(CuratorTestCase):
def test_reindex_manual(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_empty_list(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = '.tasks'
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, curator.get_indices(self.client)[0])
def test_reindex_selected_many_to_one(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
ver = curator.get_version(self.client)
if ver >= (7, 0, 0):
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
else:
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.client.indices.refresh(index=source2)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.client.indices.refresh(index=dest)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected_empty_list_fail(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('false', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 1)
def test_reindex_selected_empty_list_pass(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('true', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 0)
def test_reindex_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'my_dest'
expected = 6
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_migrate_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index=source1)['count'])
self.assertEqual(expected, self.client.count(index=source2)['count'])
def test_reindex_migrate_from_remote_with_pre_suf_fixes(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
mpfx = 'pre-'
msfx = '-fix'
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.migration_reindex.format(
wait_interval,
max_wait,
mpfx,
msfx,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
def test_reindex_from_remote_no_connection(self):
wait_interval = 1
max_wait = 3
bad_port = 70000
dest = 'my_dest'
expected = 1
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, bad_port),
'REINDEX_SELECTION',
dest,
'my_'
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
def test_reindex_from_remote_no_indices(self):
wait_interval = 1
max_wait = 3
source1 = 'wrong1'
source2 = 'wrong2'
prefix = 'my_'
dest = 'my_dest'
expected = 1
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, _.exit_code)
def test_reindex_into_alias(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
alias_body = {'aliases' : {dest : {}}}
self.client.indices.create(index='dummy', body=alias_body)
self.add_docs(source)
self.write_config(self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'], testvars.reindex.format(wait_interval, max_wait, source, dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_manual_date_math(self):
wait_interval = 1
max_wait = 3
source = '<source-{now/d}>'
dest = '<target-{now/d}>'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_bad_mapping(self):
# This test addresses GitHub issue #1260
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 1
ver = curator.get_version(self.client)
if ver < (7, 0, 0):
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "doc": { "properties": { "doc1": { "type": "keyword" }}}}
}
else:
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "properties": { "doc1": { "type": "keyword" }}}
}
self.client.indices.create(index=source, body=request_body)
self.add_docs(source)
# Create the dest index with a different mapping.
if ver < (7, 0, 0):
request_body['mappings']['doc']['properties']['doc1']['type'] = 'integer'
else:
request_body['mappings']['properties']['doc1']['type'] = 'integer'
self.client.indices.create(index=dest, body=request_body)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
| import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
import click
from click import testing as clicktest
import time
from . import CuratorTestCase
from unittest.case import SkipTest
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
rhost, rport = os.environ.get('REMOTE_ES_SERVER', 'localhost:9201').split(':')
port = int(port) if port else 9200
rport = int(rport) if rport else 9201
class TestActionFileReindex(CuratorTestCase):
def test_reindex_manual(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_empty_list(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = '.tasks'
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, curator.get_indices(self.client)[0])
def test_reindex_selected_many_to_one(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
ver = curator.get_version(self.client)
if ver >= (7, 0, 0):
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
else:
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.client.indices.refresh(index=source2)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.client.indices.refresh(index=dest)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected_empty_list_fail(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('false', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 1)
def test_reindex_selected_empty_list_pass(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('true', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 0)
def test_reindex_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'my_dest'
expected = 6
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_migrate_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index=source1)['count'])
self.assertEqual(expected, self.client.count(index=source2)['count'])
def test_reindex_migrate_from_remote_with_pre_suf_fixes(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
mpfx = 'pre-'
msfx = '-fix'
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.migration_reindex.format(
wait_interval,
max_wait,
mpfx,
msfx,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
def test_reindex_from_remote_no_connection(self):
wait_interval = 1
max_wait = 3
bad_port = 70000
dest = 'my_dest'
expected = 1
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, bad_port),
'REINDEX_SELECTION',
dest,
'my_'
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
def test_reindex_from_remote_no_indices(self):
wait_interval = 1
max_wait = 3
source1 = 'wrong1'
source2 = 'wrong2'
prefix = 'my_'
dest = 'my_dest'
expected = 1
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, _.exit_code)
def test_reindex_into_alias(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
alias_body = {'aliases' : {dest : {}}}
self.client.indices.create(index='dummy', body=alias_body)
self.add_docs(source)
self.write_config(self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'], testvars.reindex.format(wait_interval, max_wait, source, dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_manual_date_math(self):
wait_interval = 1
max_wait = 3
source = '<source-{now/d}>'
dest = '<target-{now/d}>'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_bad_mapping(self):
# This test addresses GitHub issue #1260
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 1
ver = curator.get_version(self.client)
if ver < (7, 0, 0):
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "doc": { "properties": { "doc1": { "type": "keyword" }}}}
}
else:
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "properties": { "doc1": { "type": "keyword" }}}
}
self.client.indices.create(index=source, body=request_body)
self.add_docs(source)
# Create the dest index with a different mapping.
if ver < (7, 0, 0):
request_body['mappings']['doc']['properties']['doc1']['type'] = 'integer'
else:
request_body['mappings']['properties']['doc1']['type'] = 'integer'
self.client.indices.create(index=dest, body=request_body)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code) | en | 0.822869 | # Decorators make this pylint exception necessary # pylint: disable=E1123 # Decorators make this pylint exception necessary # pylint: disable=E1123 # Decorators make this pylint exception necessary # pylint: disable=E1123 # Build remote client # Build indices remotely. # Decorators make this pylint exception necessary # pylint: disable=E1123 # Do our own cleanup here. # Build remote client # Build indices remotely. # Decorators make this pylint exception necessary # pylint: disable=E1123 # Do our own cleanup here. # And now the neat trick of verifying that the reindex worked to both # indices, and they preserved their names # Build remote client # Build indices remotely. # Decorators make this pylint exception necessary # pylint: disable=E1123 # Do our own cleanup here. # And now the neat trick of verifying that the reindex worked to both # indices, and they preserved their names # Build remote client # Build indices remotely. # Decorators make this pylint exception necessary # pylint: disable=E1123 # Do our own cleanup here. # This test addresses GitHub issue #1260 # Create the dest index with a different mapping. | 2.20989 | 2 |
libhustpass/login.py | naivekun/libhustpass | 26 | 7465 | import libhustpass.sbDes as sbDes
import libhustpass.captcha as captcha
import requests
import re
import random
def toWideChar(data):
data_bytes = bytes(data, encoding="utf-8")
ret = []
for i in data_bytes:
ret.extend([0, i])
while len(ret) % 8 != 0:
ret.append(0)
return ret
def Enc(data, first_key, second_key, third_key):
data_bytes = toWideChar(data)
key1_bytes = toWideChar(first_key)
key2_bytes = toWideChar(second_key)
key3_bytes = toWideChar(third_key)
ret_ = []
i = 0
while i < len(data_bytes):
tmp = data_bytes[i : i + 8]
x = 0
y = 0
z = 0
while x < len(key1_bytes):
enc1_ = sbDes.des(key1_bytes[x : x + 8], sbDes.ECB)
tmp = list(enc1_.encrypt(tmp))
x += 8
while y < len(key2_bytes):
enc2_ = sbDes.des(key2_bytes[y : y + 8], sbDes.ECB)
tmp = list(enc2_.encrypt(tmp))
y += 8
while z < len(key3_bytes):
enc3_ = sbDes.des(key3_bytes[z : z + 8], sbDes.ECB)
tmp = list(enc3_.encrypt(tmp))
z += 8
ret_.extend(tmp)
i += 8
ret = ""
for i in ret_:
ret += "%02X" % i
return ret
def login(username, password, url):
r = requests.session()
login_html = r.get(url)
captcha_content = r.get("https://pass.hust.edu.cn/cas/code?"+str(random.random()), stream=True)
captcha_content.raw.decode_content = True
nonce = re.search(
'<input type="hidden" id="lt" name="lt" value="(.*)" />', login_html.text
).group(1)
action = re.search(
'<form id="loginForm" action="(.*)" method="post">', login_html.text
).group(1)
post_params = {
"code": captcha.deCaptcha(captcha_content.raw),
"rsa": Enc(username + password + nonce, "1", "2", "3"),
"ul": len(username),
"pl": len(password),
"lt": nonce,
"execution": "e1s1",
"_eventId": "submit",
}
redirect_html = r.post(
"https://pass.hust.edu.cn" + action, data=post_params, allow_redirects=False
)
try:
return redirect_html.headers["Location"]
except:
raise Exception("login failed")
| import libhustpass.sbDes as sbDes
import libhustpass.captcha as captcha
import requests
import re
import random
def toWideChar(data):
data_bytes = bytes(data, encoding="utf-8")
ret = []
for i in data_bytes:
ret.extend([0, i])
while len(ret) % 8 != 0:
ret.append(0)
return ret
def Enc(data, first_key, second_key, third_key):
data_bytes = toWideChar(data)
key1_bytes = toWideChar(first_key)
key2_bytes = toWideChar(second_key)
key3_bytes = toWideChar(third_key)
ret_ = []
i = 0
while i < len(data_bytes):
tmp = data_bytes[i : i + 8]
x = 0
y = 0
z = 0
while x < len(key1_bytes):
enc1_ = sbDes.des(key1_bytes[x : x + 8], sbDes.ECB)
tmp = list(enc1_.encrypt(tmp))
x += 8
while y < len(key2_bytes):
enc2_ = sbDes.des(key2_bytes[y : y + 8], sbDes.ECB)
tmp = list(enc2_.encrypt(tmp))
y += 8
while z < len(key3_bytes):
enc3_ = sbDes.des(key3_bytes[z : z + 8], sbDes.ECB)
tmp = list(enc3_.encrypt(tmp))
z += 8
ret_.extend(tmp)
i += 8
ret = ""
for i in ret_:
ret += "%02X" % i
return ret
def login(username, password, url):
r = requests.session()
login_html = r.get(url)
captcha_content = r.get("https://pass.hust.edu.cn/cas/code?"+str(random.random()), stream=True)
captcha_content.raw.decode_content = True
nonce = re.search(
'<input type="hidden" id="lt" name="lt" value="(.*)" />', login_html.text
).group(1)
action = re.search(
'<form id="loginForm" action="(.*)" method="post">', login_html.text
).group(1)
post_params = {
"code": captcha.deCaptcha(captcha_content.raw),
"rsa": Enc(username + password + nonce, "1", "2", "3"),
"ul": len(username),
"pl": len(password),
"lt": nonce,
"execution": "e1s1",
"_eventId": "submit",
}
redirect_html = r.post(
"https://pass.hust.edu.cn" + action, data=post_params, allow_redirects=False
)
try:
return redirect_html.headers["Location"]
except:
raise Exception("login failed")
| none | 1 | 2.755907 | 3 |
|
code/contours_sorting_by_area.py | Asadullah-Dal17/contours-detection-advance | 1 | 7466 | import cv2 as cv
import numpy as np
def areaFinder(contours):
areas = []
for c in contours:
a =cv.contourArea(c)
areas.append(a)
return areas
def sortedContoursByArea(img, larger_to_smaller=True):
edges_img = cv.Canny(img, 100, 150)
contours , h = cv.findContours(edges_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
sorted_contours = sorted(contours, key=cv.contourArea, reverse=larger_to_smaller)
return sorted_contours
img = cv.imread('./Images/sample-image.png')
sorted_contours = sortedContoursByArea(img, larger_to_smaller=True)
# print(areaFinder(contours))
print(areaFinder(sorted_contours))
for c in sorted_contours:
cv.drawContours(img, c, -1, 244, 3)
cv.imshow('img', img)
cv.waitKey(0)
cv.destroyAllWindows() | import cv2 as cv
import numpy as np
def areaFinder(contours):
areas = []
for c in contours:
a =cv.contourArea(c)
areas.append(a)
return areas
def sortedContoursByArea(img, larger_to_smaller=True):
edges_img = cv.Canny(img, 100, 150)
contours , h = cv.findContours(edges_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
sorted_contours = sorted(contours, key=cv.contourArea, reverse=larger_to_smaller)
return sorted_contours
img = cv.imread('./Images/sample-image.png')
sorted_contours = sortedContoursByArea(img, larger_to_smaller=True)
# print(areaFinder(contours))
print(areaFinder(sorted_contours))
for c in sorted_contours:
cv.drawContours(img, c, -1, 244, 3)
cv.imshow('img', img)
cv.waitKey(0)
cv.destroyAllWindows() | en | 0.115267 | # print(areaFinder(contours)) | 3.091959 | 3 |
matchzoo/metrics/precision.py | ChrisRBXiong/MatchZoo-py | 468 | 7467 | <filename>matchzoo/metrics/precision.py
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
class Precision(RankingMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
| <filename>matchzoo/metrics/precision.py
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
class Precision(RankingMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
| en | 0.605053 | Precision for ranking. Precision metric. :class:`PrecisionMetric` constructor. :param k: Number of results to consider. :param threshold: the label threshold of relevance degree. :return: Formated string representation of the metric. Calculate precision@k. Example: >>> y_true = [0, 0, 0, 1] >>> y_pred = [0.2, 0.4, 0.3, 0.1] >>> Precision(k=1)(y_true, y_pred) 0.0 >>> Precision(k=2)(y_true, y_pred) 0.0 >>> Precision(k=4)(y_true, y_pred) 0.25 >>> Precision(k=5)(y_true, y_pred) 0.2 :param y_true: The ground true label of each document. :param y_pred: The predicted scores of each document. :return: Precision @ k :raises: ValueError: len(r) must be >= k. | 2.627677 | 3 |
src/main/py/ltprg/config/seq.py | forkunited/ltprg | 11 | 7468 | from mung.torch_ext.eval import Loss
from ltprg.model.seq import DataParameter, SequenceModelNoInput, SequenceModelInputToHidden, SequenceModelAttendedInput
from ltprg.model.seq import VariableLengthNLLLoss
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# }
# name : [ID FOR MODEL]
# arch_type : [SequenceModelNoInput|SequenceModelInputToHidden]
# dropout : [DROPOUT]
# rnn_layers : [RNN_LAYERS]
# rnn_size : [SIZE OF RNN HIDDEN LAYER]
# embedding_size : [EMBEDDING_SIZE]
# rnn_type : [RNN TYPE]
# (SequenceModelAttendedInput) attn_type : [EMBEDDING|OUTPUT]
# (SequenceModelInputToHidden) conv_input : [INDICATOR OF WHETHER OR NOT TO CONVOLVE THE INPUT]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_kernel : [KERNEL SIZE FOR CONVOLUTION]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_stride : [STRIDE LENGTH FOR CONVOLUTION]
# }
def load_seq_model(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
seq_field = data_parameter["seq"]
utterance_size = D[seq_field].get_matrix(0).get_feature_set().get_token_count()
dropout = float(config["dropout"])
rnn_layers = int(config["rnn_layers"])
rnn_size = int(config["rnn_size"])
embedding_size = int(config["embedding_size"])
rnn_type = config["rnn_type"]
if config["arch_type"] == "SequenceModelNoInput":
model = SequenceModelNoInput(config["name"], utterance_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type)
elif config["arch_type"] == "SequenceModelAttendedInput":
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
attn_type = "EMBEDDING"
if "attn_type" in config:
attn_type = config["attn_type"]
model = SequenceModelAttendedInput(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_kernel=conv_kernel, conv_stride=conv_stride, attn_type=attn_type)
else:
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_input = False
conv_kernel = 1
conv_stride = 1
if "conv_input" in config:
conv_input = bool(int(config["conv_input"]))
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
model = SequenceModelInputToHidden(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_input=conv_input, conv_kernel=conv_kernel, conv_stride=conv_stride)
return data_parameter, model
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# },
# evaluations : [
# name : [NAME FOR EVALUATION]
# type : (VariableLengthNLLLoss)
# data : [NAME OF DATA SUBSET]
# (Optional) data_size : [SIZE OF RANDOM SUBET OF DATA TO TAKE]
# ]
# }
def load_evaluations(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
evaluations = []
loss_criterion = VariableLengthNLLLoss(norm_dim=True)
if gpu:
loss_criterion = loss_criterion.cuda()
for eval_config in config["evaluations"]:
data = D[eval_config["data"]]
if "data_size" in eval_config:
data = data.get_random_subset(int(eval_config["data_size"]))
if eval_config["type"] == "VariableLengthNLLLoss":
loss = Loss(eval_config["name"], data, data_parameter, loss_criterion, norm_dim=True)
evaluations.append(loss)
else:
raise ValueError("Invalid seq evaluation type in config (" + str(eval_config["type"]))
return evaluations
| from mung.torch_ext.eval import Loss
from ltprg.model.seq import DataParameter, SequenceModelNoInput, SequenceModelInputToHidden, SequenceModelAttendedInput
from ltprg.model.seq import VariableLengthNLLLoss
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# }
# name : [ID FOR MODEL]
# arch_type : [SequenceModelNoInput|SequenceModelInputToHidden]
# dropout : [DROPOUT]
# rnn_layers : [RNN_LAYERS]
# rnn_size : [SIZE OF RNN HIDDEN LAYER]
# embedding_size : [EMBEDDING_SIZE]
# rnn_type : [RNN TYPE]
# (SequenceModelAttendedInput) attn_type : [EMBEDDING|OUTPUT]
# (SequenceModelInputToHidden) conv_input : [INDICATOR OF WHETHER OR NOT TO CONVOLVE THE INPUT]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_kernel : [KERNEL SIZE FOR CONVOLUTION]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_stride : [STRIDE LENGTH FOR CONVOLUTION]
# }
def load_seq_model(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
seq_field = data_parameter["seq"]
utterance_size = D[seq_field].get_matrix(0).get_feature_set().get_token_count()
dropout = float(config["dropout"])
rnn_layers = int(config["rnn_layers"])
rnn_size = int(config["rnn_size"])
embedding_size = int(config["embedding_size"])
rnn_type = config["rnn_type"]
if config["arch_type"] == "SequenceModelNoInput":
model = SequenceModelNoInput(config["name"], utterance_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type)
elif config["arch_type"] == "SequenceModelAttendedInput":
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
attn_type = "EMBEDDING"
if "attn_type" in config:
attn_type = config["attn_type"]
model = SequenceModelAttendedInput(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_kernel=conv_kernel, conv_stride=conv_stride, attn_type=attn_type)
else:
input_field = data_parameter["input"]
input_size = D[input_field].get_feature_set().get_token_count()
conv_input = False
conv_kernel = 1
conv_stride = 1
if "conv_input" in config:
conv_input = bool(int(config["conv_input"]))
conv_kernel = int(config["conv_kernel"])
conv_stride = int(config["conv_stride"])
model = SequenceModelInputToHidden(config["name"], utterance_size, input_size, \
embedding_size, rnn_size, rnn_layers, dropout=dropout, rnn_type=rnn_type, \
conv_input=conv_input, conv_kernel=conv_kernel, conv_stride=conv_stride)
return data_parameter, model
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# },
# evaluations : [
# name : [NAME FOR EVALUATION]
# type : (VariableLengthNLLLoss)
# data : [NAME OF DATA SUBSET]
# (Optional) data_size : [SIZE OF RANDOM SUBET OF DATA TO TAKE]
# ]
# }
def load_evaluations(config, D, gpu=False):
data_parameter = DataParameter.make(**config["data_parameter"])
evaluations = []
loss_criterion = VariableLengthNLLLoss(norm_dim=True)
if gpu:
loss_criterion = loss_criterion.cuda()
for eval_config in config["evaluations"]:
data = D[eval_config["data"]]
if "data_size" in eval_config:
data = data.get_random_subset(int(eval_config["data_size"]))
if eval_config["type"] == "VariableLengthNLLLoss":
loss = Loss(eval_config["name"], data, data_parameter, loss_criterion, norm_dim=True)
evaluations.append(loss)
else:
raise ValueError("Invalid seq evaluation type in config (" + str(eval_config["type"]))
return evaluations
| en | 0.348839 | # Expects config of the form: # { # data_parameter : { # seq : [SEQUENCE PARAMETER NAME] # input : [INPUT PARAMETER NAME] # } # name : [ID FOR MODEL] # arch_type : [SequenceModelNoInput|SequenceModelInputToHidden] # dropout : [DROPOUT] # rnn_layers : [RNN_LAYERS] # rnn_size : [SIZE OF RNN HIDDEN LAYER] # embedding_size : [EMBEDDING_SIZE] # rnn_type : [RNN TYPE] # (SequenceModelAttendedInput) attn_type : [EMBEDDING|OUTPUT] # (SequenceModelInputToHidden) conv_input : [INDICATOR OF WHETHER OR NOT TO CONVOLVE THE INPUT] # (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_kernel : [KERNEL SIZE FOR CONVOLUTION] # (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_stride : [STRIDE LENGTH FOR CONVOLUTION] # } # Expects config of the form: # { # data_parameter : { # seq : [SEQUENCE PARAMETER NAME] # input : [INPUT PARAMETER NAME] # }, # evaluations : [ # name : [NAME FOR EVALUATION] # type : (VariableLengthNLLLoss) # data : [NAME OF DATA SUBSET] # (Optional) data_size : [SIZE OF RANDOM SUBET OF DATA TO TAKE] # ] # } | 2.213829 | 2 |
src/Utilities/metadata_worker.py | sigseg5/nometa-tg | 3 | 7469 | from shutil import move
import piexif
from PIL import Image
def delete_metadata(full_path_to_img):
"""
This function used for remove metadata only from documents, if you send image 'as image' Telegram automatically
removes all metadata at sending. This function removes all metadata via 'piexif' lib, saved image in '/app'
folder, and after that move it to 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.jpg'
"""
piexif.remove(full_path_to_img, "clean_image.jpg")
move("clean_image.jpg", "documents/clean_image.jpg")
def delete_metadata_from_png(full_path_to_img):
"""
This function used for remove metadata only from png documents, if you send image 'as image' Telegram
automatically removes all metadata at sending. This function removes all metadata via 'PIL' lib and saved image
in 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.png'
"""
image = Image.open(full_path_to_img)
image.save("documents/clean_image.png")
| from shutil import move
import piexif
from PIL import Image
def delete_metadata(full_path_to_img):
"""
This function used for remove metadata only from documents, if you send image 'as image' Telegram automatically
removes all metadata at sending. This function removes all metadata via 'piexif' lib, saved image in '/app'
folder, and after that move it to 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.jpg'
"""
piexif.remove(full_path_to_img, "clean_image.jpg")
move("clean_image.jpg", "documents/clean_image.jpg")
def delete_metadata_from_png(full_path_to_img):
"""
This function used for remove metadata only from png documents, if you send image 'as image' Telegram
automatically removes all metadata at sending. This function removes all metadata via 'PIL' lib and saved image
in 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.png'
"""
image = Image.open(full_path_to_img)
image.save("documents/clean_image.png")
| en | 0.709679 | This function used for remove metadata only from documents, if you send image 'as image' Telegram automatically removes all metadata at sending. This function removes all metadata via 'piexif' lib, saved image in '/app' folder, and after that move it to 'documents' folder. :param full_path_to_img: path to folder with documents e.g.'documents/image.jpg' This function used for remove metadata only from png documents, if you send image 'as image' Telegram automatically removes all metadata at sending. This function removes all metadata via 'PIL' lib and saved image in 'documents' folder. :param full_path_to_img: path to folder with documents e.g.'documents/image.png' | 3.128879 | 3 |
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_unit_2.py | pbarton666/virtual_classroom | 0 | 7470 | #py_unit_2.py
import unittest
class FirstTest(unittest.TestCase):
def setUp(self):
"setUp() runs before every test"
self.msg="Sorry, Charlie, but {} is not the same as {}."
def tearDown(self):
"tearDown runs after every test"
pass
def test_me(self):
"this test should pass"
first=1
second=2
self.assertEqual(first,1, msg=self.msg.format(first, second))
def test_failing(self):
"this test should fail"
first=1
second=2
self.assertEqual(second,1, msg=self.msg.format(first, second))
def test_passing(self):
"this test should pass, too"
self.assertEqual("b", "b")
def test_passing_a_failing_test(self):
"this test should pass, even though it 'fails'"
self.assertNotEqual("a", "b")
if __name__=='__main__':
unittest.main() | #py_unit_2.py
import unittest
class FirstTest(unittest.TestCase):
def setUp(self):
"setUp() runs before every test"
self.msg="Sorry, Charlie, but {} is not the same as {}."
def tearDown(self):
"tearDown runs after every test"
pass
def test_me(self):
"this test should pass"
first=1
second=2
self.assertEqual(first,1, msg=self.msg.format(first, second))
def test_failing(self):
"this test should fail"
first=1
second=2
self.assertEqual(second,1, msg=self.msg.format(first, second))
def test_passing(self):
"this test should pass, too"
self.assertEqual("b", "b")
def test_passing_a_failing_test(self):
"this test should pass, even though it 'fails'"
self.assertNotEqual("a", "b")
if __name__=='__main__':
unittest.main() | ru | 0.430017 | #py_unit_2.py | 3.469018 | 3 |
src/scheduled_task/__init__.py | Sciocatti/python_scheduler_and_clean_forced_exit | 0 | 7471 | from .scheduled_task import ScheduledTask | from .scheduled_task import ScheduledTask | none | 1 | 1.026928 | 1 |
|
scripts/game.py | davidnegrazis/PyPlayText-Workshop | 0 | 7472 | from sys import exit
# ------------------------------------------------------------------------------
global dev_name
global game_title
dev_name = "" # enter your name in the quotes!
game_title = "" # enter the game title in the quotes!
# ------------------------------------------------------------------------------
# ---------- initial values ----------
# these are used to define the starting values of your game variables
init_health = 100
init_mana = 200
init_boss_health = 50
# ---------- game variables ----------
# these will be used during the game
health = 0
mana = 0
boss_health = 0
# ---------- some useful functions ----------
# initialize game variables
def init():
global health
global mana
health = init_health
mana = init_mana
# game over
def game_over(msg):
print(msg)
print("Play again? (y / n)")
while (True):
choice = input("> ")
if (choice == "y"):
start()
break
elif (choice == "n"):
exit(0)
else:
print("Options: y / n")
# ---------- room definitions ----------
# here is where you'll create the flow of the game!
# room 0: where the game starts
def room_0():
global health
print("This is the first stage of the game. Create a custom description and get coding!")
print("Current health: " + str(health))
choice = input("> ");
if "end" in choice:
game_over("The game is over")
def start():
start_msg = "Now playing " + game_title + " by " + dev_name
print(start_msg)
init()
room_0()
# ---------- game start ----------
start()
| from sys import exit
# ------------------------------------------------------------------------------
global dev_name
global game_title
dev_name = "" # enter your name in the quotes!
game_title = "" # enter the game title in the quotes!
# ------------------------------------------------------------------------------
# ---------- initial values ----------
# these are used to define the starting values of your game variables
init_health = 100
init_mana = 200
init_boss_health = 50
# ---------- game variables ----------
# these will be used during the game
health = 0
mana = 0
boss_health = 0
# ---------- some useful functions ----------
# initialize game variables
def init():
global health
global mana
health = init_health
mana = init_mana
# game over
def game_over(msg):
print(msg)
print("Play again? (y / n)")
while (True):
choice = input("> ")
if (choice == "y"):
start()
break
elif (choice == "n"):
exit(0)
else:
print("Options: y / n")
# ---------- room definitions ----------
# here is where you'll create the flow of the game!
# room 0: where the game starts
def room_0():
global health
print("This is the first stage of the game. Create a custom description and get coding!")
print("Current health: " + str(health))
choice = input("> ");
if "end" in choice:
game_over("The game is over")
def start():
start_msg = "Now playing " + game_title + " by " + dev_name
print(start_msg)
init()
room_0()
# ---------- game start ----------
start()
| en | 0.571419 | # ------------------------------------------------------------------------------ # enter your name in the quotes! # enter the game title in the quotes! # ------------------------------------------------------------------------------ # ---------- initial values ---------- # these are used to define the starting values of your game variables # ---------- game variables ---------- # these will be used during the game # ---------- some useful functions ---------- # initialize game variables # game over # ---------- room definitions ---------- # here is where you'll create the flow of the game! # room 0: where the game starts # ---------- game start ---------- | 3.986287 | 4 |
lc1108_defangip.py | moheed/algo | 0 | 7473 | <reponame>moheed/algo<gh_stars>0
class Solution:
def defangIPaddr(self, address: str) -> str:
i=0
j=0
strlist=list(address)
defang=[]
while i< len(strlist):
if strlist[i] == '.':
defang.append('[')
defang.append('.')
defang.append(']')
else:
defang.append(address[i])
i+=1
retstr=""
# return string
return (retstr.join(defang))
| class Solution:
def defangIPaddr(self, address: str) -> str:
i=0
j=0
strlist=list(address)
defang=[]
while i< len(strlist):
if strlist[i] == '.':
defang.append('[')
defang.append('.')
defang.append(']')
else:
defang.append(address[i])
i+=1
retstr=""
# return string
return (retstr.join(defang)) | fi | 0.050455 | # return string | 3.385378 | 3 |
src/elections_address_files/commands/zip_files.py | gregbunce/assign_vista_pcts_to_sgid_addrpnts | 0 | 7474 | <filename>src/elections_address_files/commands/zip_files.py<gh_stars>0
import os, zipfile
# Zip files.
def zipfiles(directory):
# File extension to zip.
#ext = ('.gdb', '.csv')
ext = ('.gdb')
# Iterate over all files and check for desired extentions for zipping.
for file in os.listdir(directory):
if file.endswith(ext):
#: Zip it.
input_fgdb_name = file.rsplit( ".", 1)[0]
output_zipped_fgdb_name = "/" + input_fgdb_name + "_gdb.zip"
full_path_to_fgdb = directory + "/" + file
print(" Zipping " + str(full_path_to_fgdb))
outFile = f'{full_path_to_fgdb[0:-4]}_gdb.zip'
gdbName = os.path.basename(full_path_to_fgdb)
with zipfile.ZipFile(outFile,mode='w',compression=zipfile.ZIP_DEFLATED,allowZip64=True) as myzip:
for f in os.listdir(full_path_to_fgdb):
if f[-5:] != '.lock':
myzip.write(os.path.join(full_path_to_fgdb,f),gdbName+'\\'+os.path.basename(f))
else:
continue
| <filename>src/elections_address_files/commands/zip_files.py<gh_stars>0
import os, zipfile
# Zip files.
def zipfiles(directory):
# File extension to zip.
#ext = ('.gdb', '.csv')
ext = ('.gdb')
# Iterate over all files and check for desired extentions for zipping.
for file in os.listdir(directory):
if file.endswith(ext):
#: Zip it.
input_fgdb_name = file.rsplit( ".", 1)[0]
output_zipped_fgdb_name = "/" + input_fgdb_name + "_gdb.zip"
full_path_to_fgdb = directory + "/" + file
print(" Zipping " + str(full_path_to_fgdb))
outFile = f'{full_path_to_fgdb[0:-4]}_gdb.zip'
gdbName = os.path.basename(full_path_to_fgdb)
with zipfile.ZipFile(outFile,mode='w',compression=zipfile.ZIP_DEFLATED,allowZip64=True) as myzip:
for f in os.listdir(full_path_to_fgdb):
if f[-5:] != '.lock':
myzip.write(os.path.join(full_path_to_fgdb,f),gdbName+'\\'+os.path.basename(f))
else:
continue
| en | 0.869773 | # Zip files. # File extension to zip. #ext = ('.gdb', '.csv') # Iterate over all files and check for desired extentions for zipping. #: Zip it. | 3.061276 | 3 |
tartiflette/parser/nodes/node.py | erezsh/tartiflette | 0 | 7475 | <filename>tartiflette/parser/nodes/node.py<gh_stars>0
class Node:
def __init__(self, path, libgraphql_type, location, name):
self.path = path
self.parent = None
self.children = []
self.libgraphql_type = libgraphql_type
self.location = location
self.name = name
def __repr__(self):
return "%s(%s)" % (self.libgraphql_type, self.name)
| <filename>tartiflette/parser/nodes/node.py<gh_stars>0
class Node:
def __init__(self, path, libgraphql_type, location, name):
self.path = path
self.parent = None
self.children = []
self.libgraphql_type = libgraphql_type
self.location = location
self.name = name
def __repr__(self):
return "%s(%s)" % (self.libgraphql_type, self.name)
| none | 1 | 2.285786 | 2 |
|
mars/services/web/tests/test_core.py | yuyiming/mars | 1 | 7476 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import sys
import pytest
from tornado import httpclient
from .... import oscar as mo
from ....utils import get_next_port
from .. import WebActor, web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from ..api.web import MarsApiEntryHandler
class TestAPIHandler(MarsServiceWebAPIHandler):
__test__ = False
_root_pattern = "/api/test/(?P<test_id>[^/]+)"
@web_api("", method="get")
def get_method_root(self, test_id):
self.write(f"get_root_value_{test_id}")
@web_api("", method="post")
def post_method_root(self, test_id):
self.write(f"post_root_value_{test_id}")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get")
def get_method_sub_patt(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get", arg_filter={"action": "a1"})
async def get_method_sub_patt_match_arg1(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}_action1")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get", arg_filter={"action": "a2"})
async def get_method_sub_patt_match_arg2(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}_action2")
@web_api("subtest_error", method="get")
def get_with_error(self, test_id):
raise ValueError
@web_api("subtest_delay", method="get")
async def get_with_timeout(self, test_id):
await asyncio.sleep(100)
raise ValueError(test_id)
@pytest.fixture
async def actor_pool():
start_method = (
os.environ.get("POOL_START_METHOD", "forkserver")
if sys.platform != "win32"
else None
)
pool = await mo.create_actor_pool(
"127.0.0.1", n_process=0, subprocess_start_method=start_method
)
async with pool:
web_config = {
"host": "127.0.0.1",
"port": get_next_port(),
"web_handlers": {
"/api": MarsApiEntryHandler,
TestAPIHandler.get_root_pattern(): TestAPIHandler,
},
"extra_discovery_modules": ["mars.services.web.tests.extra_handler"],
}
await mo.create_actor(WebActor, web_config, address=pool.external_address)
yield pool, web_config["port"]
class SimpleWebClient(MarsWebAPIClientMixin):
async def fetch(self, path, method="GET", **kwargs):
return await self._request_url(method, path, **kwargs)
@pytest.mark.asyncio
async def test_web_api(actor_pool):
_pool, web_port = actor_pool
recorded_urls = []
def url_recorder(request):
recorded_urls.append(request.url)
return request
client = SimpleWebClient()
client.request_rewriter = url_recorder
res = await client.fetch(f"http://localhost:{web_port}/")
assert res.body.decode()
res = await client.fetch(f"http://localhost:{web_port}/api")
assert res.body.decode()
res = await client.fetch(f"http://localhost:{web_port}/api/test/test_id")
assert res.body.decode() == "get_root_value_test_id"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id", method="POST", data=b""
)
assert res.body.decode() == "post_root_value_test_id"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid?action=a1"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid_action1"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid?action=a2"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid_action2"
with pytest.raises(httpclient.HTTPError) as excinfo:
await client.fetch(f"http://localhost:{web_port}/api/test/test_id/non_exist")
assert excinfo.value.code == 404
with pytest.raises(ValueError):
await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest_error"
)
with pytest.raises(TimeoutError):
await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest_delay",
request_timeout=0.5,
)
res = await client.fetch(f"http://localhost:{web_port}/api/extra_test")
assert "Test" in res.body.decode()
assert len(recorded_urls) > 0
| # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import sys
import pytest
from tornado import httpclient
from .... import oscar as mo
from ....utils import get_next_port
from .. import WebActor, web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from ..api.web import MarsApiEntryHandler
class TestAPIHandler(MarsServiceWebAPIHandler):
__test__ = False
_root_pattern = "/api/test/(?P<test_id>[^/]+)"
@web_api("", method="get")
def get_method_root(self, test_id):
self.write(f"get_root_value_{test_id}")
@web_api("", method="post")
def post_method_root(self, test_id):
self.write(f"post_root_value_{test_id}")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get")
def get_method_sub_patt(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get", arg_filter={"action": "a1"})
async def get_method_sub_patt_match_arg1(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}_action1")
@web_api("subtest/(?P<subtest_id>[^/]+)", method="get", arg_filter={"action": "a2"})
async def get_method_sub_patt_match_arg2(self, test_id, subtest_id):
self.write(f"get_sub_value_{test_id}_{subtest_id}_action2")
@web_api("subtest_error", method="get")
def get_with_error(self, test_id):
raise ValueError
@web_api("subtest_delay", method="get")
async def get_with_timeout(self, test_id):
await asyncio.sleep(100)
raise ValueError(test_id)
@pytest.fixture
async def actor_pool():
start_method = (
os.environ.get("POOL_START_METHOD", "forkserver")
if sys.platform != "win32"
else None
)
pool = await mo.create_actor_pool(
"127.0.0.1", n_process=0, subprocess_start_method=start_method
)
async with pool:
web_config = {
"host": "127.0.0.1",
"port": get_next_port(),
"web_handlers": {
"/api": MarsApiEntryHandler,
TestAPIHandler.get_root_pattern(): TestAPIHandler,
},
"extra_discovery_modules": ["mars.services.web.tests.extra_handler"],
}
await mo.create_actor(WebActor, web_config, address=pool.external_address)
yield pool, web_config["port"]
class SimpleWebClient(MarsWebAPIClientMixin):
async def fetch(self, path, method="GET", **kwargs):
return await self._request_url(method, path, **kwargs)
@pytest.mark.asyncio
async def test_web_api(actor_pool):
_pool, web_port = actor_pool
recorded_urls = []
def url_recorder(request):
recorded_urls.append(request.url)
return request
client = SimpleWebClient()
client.request_rewriter = url_recorder
res = await client.fetch(f"http://localhost:{web_port}/")
assert res.body.decode()
res = await client.fetch(f"http://localhost:{web_port}/api")
assert res.body.decode()
res = await client.fetch(f"http://localhost:{web_port}/api/test/test_id")
assert res.body.decode() == "get_root_value_test_id"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id", method="POST", data=b""
)
assert res.body.decode() == "post_root_value_test_id"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid?action=a1"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid_action1"
res = await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest/sub_tid?action=a2"
)
assert res.body.decode() == "get_sub_value_test_id_sub_tid_action2"
with pytest.raises(httpclient.HTTPError) as excinfo:
await client.fetch(f"http://localhost:{web_port}/api/test/test_id/non_exist")
assert excinfo.value.code == 404
with pytest.raises(ValueError):
await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest_error"
)
with pytest.raises(TimeoutError):
await client.fetch(
f"http://localhost:{web_port}/api/test/test_id/subtest_delay",
request_timeout=0.5,
)
res = await client.fetch(f"http://localhost:{web_port}/api/extra_test")
assert "Test" in res.body.decode()
assert len(recorded_urls) > 0
| en | 0.839836 | # Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.891118 | 2 |
test/test.py | caizhanjin/deepseg | 0 | 7477 | """
例子为MNIST,对手写图片进行分类。
神经网络hello world。
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 封装网络用到的API
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x,
W,
strides= [1, 1, 1, 1],
padding= 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,
ksize= [1, 2, 2, 1],
strides= [1, 2, 2, 1],
padding='SAME')
"""
MNIST进阶
"""
sess = tf.InteractiveSession()
# [batch_size, 784]
x = tf.placeholder('float', shape=[None, 784])
y_ = tf.placeholder('float', shape=[None, 10])
"""
第一层卷积
"""
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# [batch_size, 28, 28, 1]
x_image = tf.reshape(x, [-1, 28, 28, 1])
# [batch_size, 28, 28, 32]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# [batch_size, 14, 14, 32]
h_pool1 = max_pool_2x2(h_conv1)
"""
第二层卷积
"""
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
# [batch_size, 14, 14, 64]
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# [batch_size, 7, 7, 64]
h_pool2 = max_pool_2x2(h_conv2)
"""
全连接层
"""
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
# [batch_size, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# [batch_size, 1024]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
"""
dropout
"""
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
"""
输出层
"""
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
# [batch_size, 10]
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_sum = tf.reduce_sum(y_conv[0])
# 计算损失和添加优化器
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 评估模型
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 初始化
sess.run(tf.initialize_all_variables())
for i in range(1):
batch = mnist.train.next_batch(50)
# train_accuracy = accuracy.eval(feed_dict={x:batch[0],
# y_: batch[1],
# keep_prob: 1.0})
# print("step %d, training accuracy %g" % (i, train_accuracy))
y_conv_re = y_conv.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
# print(y_conv_re.shape)
print(y_conv_re)
y_sum_re = y_sum.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
print(y_sum_re)
train_step.run(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images,
y_: mnist.test.labels,
keep_prob: 1.0}))
| """
例子为MNIST,对手写图片进行分类。
神经网络hello world。
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 封装网络用到的API
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x,
W,
strides= [1, 1, 1, 1],
padding= 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,
ksize= [1, 2, 2, 1],
strides= [1, 2, 2, 1],
padding='SAME')
"""
MNIST进阶
"""
sess = tf.InteractiveSession()
# [batch_size, 784]
x = tf.placeholder('float', shape=[None, 784])
y_ = tf.placeholder('float', shape=[None, 10])
"""
第一层卷积
"""
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# [batch_size, 28, 28, 1]
x_image = tf.reshape(x, [-1, 28, 28, 1])
# [batch_size, 28, 28, 32]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# [batch_size, 14, 14, 32]
h_pool1 = max_pool_2x2(h_conv1)
"""
第二层卷积
"""
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
# [batch_size, 14, 14, 64]
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# [batch_size, 7, 7, 64]
h_pool2 = max_pool_2x2(h_conv2)
"""
全连接层
"""
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
# [batch_size, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# [batch_size, 1024]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
"""
dropout
"""
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
"""
输出层
"""
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
# [batch_size, 10]
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_sum = tf.reduce_sum(y_conv[0])
# 计算损失和添加优化器
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 评估模型
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 初始化
sess.run(tf.initialize_all_variables())
for i in range(1):
batch = mnist.train.next_batch(50)
# train_accuracy = accuracy.eval(feed_dict={x:batch[0],
# y_: batch[1],
# keep_prob: 1.0})
# print("step %d, training accuracy %g" % (i, train_accuracy))
y_conv_re = y_conv.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
# print(y_conv_re.shape)
print(y_conv_re)
y_sum_re = y_sum.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
print(y_sum_re)
train_step.run(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images,
y_: mnist.test.labels,
keep_prob: 1.0}))
| en | 0.35113 | 例子为MNIST,对手写图片进行分类。 神经网络hello world。 # 封装网络用到的API MNIST进阶 # [batch_size, 784] 第一层卷积 # [batch_size, 28, 28, 1] # [batch_size, 28, 28, 32] # [batch_size, 14, 14, 32] 第二层卷积 # [batch_size, 14, 14, 64] # [batch_size, 7, 7, 64] 全连接层 # [batch_size, 7*7*64] # [batch_size, 1024] dropout 输出层 # [batch_size, 10] # 计算损失和添加优化器 # 评估模型 # 初始化 # train_accuracy = accuracy.eval(feed_dict={x:batch[0], # y_: batch[1], # keep_prob: 1.0}) # print("step %d, training accuracy %g" % (i, train_accuracy)) # print(y_conv_re.shape) | 3.779173 | 4 |
game/content/ghplots/lancemates.py | jwvhewitt/gearhead-caramel | 74 | 7478 | import pbge
from game.content.plotutility import LMSkillsSelfIntro
from game.content import backstory
from pbge.plots import Plot
from pbge.dialogue import Offer, ContextTag
from game.ghdialogue import context
import gears
import game.content.gharchitecture
import game.content.ghterrain
import random
from game import memobrowser
Memo = memobrowser.Memo
# *******************
# *** UTILITIES ***
# *******************
def get_hire_cost(camp, npc):
return (npc.renown * npc.renown * (200 - npc.get_reaction_score(camp.pc, camp)))//10
# **************************
# *** RANDOM_LANCEMATE ***
# **************************
class UtterlyRandomLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class UtterlyGenericLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Recon Pilot","Mercenary","Bounty Hunter")
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
if random.randint(1,20) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GiftedNewbieLancemate(Plot):
# Amazing stats, amazingly crap skills.
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Citizen","Explorer","Factory Worker")
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(statline=gears.base.Being.random_stats(random.randint(100, 110)),
rank=random.randint(5, 15),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(18,23))
if random.randint(1,10) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class OlderMentorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(41, 85),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(32,50))
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1, 4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class DeadzonerInGreenZoneLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mercenary","Bandit","Scavenger","Aristo","Tekno","Sheriff")
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.GreenZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(20, 55),random.randint(20, 55)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GladiatorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.DeadZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(25, 65),random.randint(25, 65)),
can_cyberize=True,
job=gears.jobs.ALL_JOBS["Gladiator"],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate: gears.GearHeadScene):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class MutantLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return {gears.personality.GreenZone,gears.personality.DeadZone}.intersection(pstate.elements["METROSCENE"].attributes)
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(20, 45),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
mutation = random.choice(gears.personality.MUTATIONS)
mutation.apply(npc)
npc.personality.add(mutation)
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate, pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class FormerLancemateReturns(Plot):
LABEL = "RANDOM_LANCEMATE"
active = True
scope = "METRO"
def custom_init(self, nart):
npc: gears.base.Character = nart.camp.egg.seek_dramatis_person(nart.camp, self._is_good_npc, self)
if npc:
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
#print(npc,scene)
self.bs = backstory.Backstory(("LONGTIMENOSEE",),keywords=[t.name.upper() for t in npc.get_tags()])
return npc
def _is_good_npc(self,nart,candidate):
return isinstance(candidate, gears.base.Character) and candidate.relationship and gears.relationships.RT_LANCEMATE in candidate.relationship.tags
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,gears.GearHeadScene) and gears.tags.SCENE_PUBLIC in candidate.attributes
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if npc is self.elements["NPC"]:
for k in self.bs.results.keys():
mygram[k] = [self.bs.get_one(k),]
else:
mygram["[News]"] = ["{NPC} has been hanging out at {LOCALE}".format(**self.elements), ]
return mygram
def NPC_offers(self, camp):
mylist = list()
mylist.append(Offer("[INFO_PERSONAL]",
context=ContextTag([context.PERSONAL]),
no_repeats=True, effect=self.end_plot))
return mylist
def t_START(self, camp):
if self.elements["NPC"] in camp.party:
self.end_plot(camp)
# **************************
# *** RLM_Relationship ***
# **************************
# Elements:
# NPC: The NPC who needs a personality
# METROSCENE: The city or whatever that the NPC calls home
#
# These subplots contain a personality for a random (potential) lancemate.
# Also include a means for the lancemate to gain the "RT_LANCEMATE" tag.
class RLM_Beginner(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown < 25
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_JUNIOR)
# This character gets fewer mecha points.
npc.relationship.data["mecha_level_bonus"] = -10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I can't believe you asked me... [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] Some day I want to become a cavalier like you.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} has dreams of someday becoming a cavalier".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} usually hangs out at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} dreams of becoming a cavalier.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Friendly(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_FRIENDLY)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate() and npc.get_reaction_score(camp.pc, camp) > 0:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is looking for a lance to join".format(self.elements["NPC"]), ]
return mygram
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}, if you're planning to invite {} to join your lance.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a lance to join.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Medic(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
VIRTUES = (gears.personality.Peace,gears.personality.Fellowship)
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and gears.tags.Medic in pstate.elements["NPC"].job.tags
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_GREATERGOOD)
new_virtue = random.choice(self.VIRTUES)
if new_virtue not in npc.personality:
npc.personality.add(new_virtue)
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
else:
mylist.append(Offer("You've got a full crew right now, but if you ever find yourself in need of a qualified medic come back and find me.",
context=ContextTag((context.JOIN,)),
effect=self._defer_join
))
mylist.append(Offer(
"[HELLO] Lately I've been spending too much time here, when I'd rather be out in the danger zone saving lives.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} wants to leave {} so {} can make a positive difference in the world".format(self.elements["NPC"],self.elements["NPC"].get_scene(),self.elements["NPC"].gender.subject_pronoun), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _defer_join(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
self.end_plot(camp)
class RLM_Mercenary(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and {gears.tags.Adventurer,gears.tags.Military}.intersection(pstate.elements["NPC"].job.tags)
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_MERCENARY)
# This character gets extra mecha points, showing their good investment sense.
npc.relationship.data["mecha_level_bonus"] = 10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I'll join your lance for a mere ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I am a mercenary pilot, looking for my next contract.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is hoping to make some quick cash".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} can usually be found at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is a mercenary pilot looking for a job.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Professional(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown > 20
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_PROFESSIONAL)
# This character gets 10 extra stat points, showing their elite nature.
npc.roll_stats(10, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer(
"[NOEXPOSURE] I think ${} is a fair signing price. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)), data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I see you are also a cavalier.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is an experienced pilot looking for work".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}. Bring cash if you're planning to hire {}.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is an experienced pilot looking for work.".format(mynpc)
, mynpc.get_scene()
)
class RLM_RatherGeneric(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship()
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 60:
mylist.append(Offer("[IWOULDLOVETO] [THANKS_FOR_CHOOSING_ME]",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("My regular signing rate is ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Must be nice going off, having adventures with your lancemates. I'd like to do that again someday.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{} is looking for a new lance to join".format(self.elements["NPC"]), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
class RLM_DamagedGoodsSale(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_IMPROVER)
# This NPC gets a stat bonus but a crappy mech to show their history.
npc.relationship.data["mecha_level_bonus"] = -15
npc.roll_stats(5, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)//2
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 20:
mylist.append(Offer("[IWOULDLOVETO] I'll do my best to not let you down.",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("I'll sign up with you for just ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] I'll do my best to not let you down.",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] The life of a cavalier is full of ups and downs... right now I'm in one of those downs.", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Be careful out there... all it takes is one little mistake to cost you everything.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{NPC} is a down on {NPC.gender.possessive_determiner} luck cavalier looking for another chance".format(**self.elements), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}. Don't say that you weren't warned.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
| import pbge
from game.content.plotutility import LMSkillsSelfIntro
from game.content import backstory
from pbge.plots import Plot
from pbge.dialogue import Offer, ContextTag
from game.ghdialogue import context
import gears
import game.content.gharchitecture
import game.content.ghterrain
import random
from game import memobrowser
Memo = memobrowser.Memo
# *******************
# *** UTILITIES ***
# *******************
def get_hire_cost(camp, npc):
return (npc.renown * npc.renown * (200 - npc.get_reaction_score(camp.pc, camp)))//10
# **************************
# *** RANDOM_LANCEMATE ***
# **************************
class UtterlyRandomLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class UtterlyGenericLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Recon Pilot","Mercenary","Bounty Hunter")
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
if random.randint(1,20) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GiftedNewbieLancemate(Plot):
# Amazing stats, amazingly crap skills.
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Citizen","Explorer","Factory Worker")
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(statline=gears.base.Being.random_stats(random.randint(100, 110)),
rank=random.randint(5, 15),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(18,23))
if random.randint(1,10) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class OlderMentorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(41, 85),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(32,50))
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1, 4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class DeadzonerInGreenZoneLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mercenary","Bandit","Scavenger","Aristo","Tekno","Sheriff")
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.GreenZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(20, 55),random.randint(20, 55)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GladiatorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.DeadZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(25, 65),random.randint(25, 65)),
can_cyberize=True,
job=gears.jobs.ALL_JOBS["Gladiator"],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate: gears.GearHeadScene):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class MutantLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return {gears.personality.GreenZone,gears.personality.DeadZone}.intersection(pstate.elements["METROSCENE"].attributes)
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(20, 45),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
mutation = random.choice(gears.personality.MUTATIONS)
mutation.apply(npc)
npc.personality.add(mutation)
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate, pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class FormerLancemateReturns(Plot):
LABEL = "RANDOM_LANCEMATE"
active = True
scope = "METRO"
def custom_init(self, nart):
npc: gears.base.Character = nart.camp.egg.seek_dramatis_person(nart.camp, self._is_good_npc, self)
if npc:
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
#print(npc,scene)
self.bs = backstory.Backstory(("LONGTIMENOSEE",),keywords=[t.name.upper() for t in npc.get_tags()])
return npc
def _is_good_npc(self,nart,candidate):
return isinstance(candidate, gears.base.Character) and candidate.relationship and gears.relationships.RT_LANCEMATE in candidate.relationship.tags
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,gears.GearHeadScene) and gears.tags.SCENE_PUBLIC in candidate.attributes
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if npc is self.elements["NPC"]:
for k in self.bs.results.keys():
mygram[k] = [self.bs.get_one(k),]
else:
mygram["[News]"] = ["{NPC} has been hanging out at {LOCALE}".format(**self.elements), ]
return mygram
def NPC_offers(self, camp):
mylist = list()
mylist.append(Offer("[INFO_PERSONAL]",
context=ContextTag([context.PERSONAL]),
no_repeats=True, effect=self.end_plot))
return mylist
def t_START(self, camp):
if self.elements["NPC"] in camp.party:
self.end_plot(camp)
# **************************
# *** RLM_Relationship ***
# **************************
# Elements:
# NPC: The NPC who needs a personality
# METROSCENE: The city or whatever that the NPC calls home
#
# These subplots contain a personality for a random (potential) lancemate.
# Also include a means for the lancemate to gain the "RT_LANCEMATE" tag.
class RLM_Beginner(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown < 25
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_JUNIOR)
# This character gets fewer mecha points.
npc.relationship.data["mecha_level_bonus"] = -10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I can't believe you asked me... [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] Some day I want to become a cavalier like you.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} has dreams of someday becoming a cavalier".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} usually hangs out at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} dreams of becoming a cavalier.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Friendly(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_FRIENDLY)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate() and npc.get_reaction_score(camp.pc, camp) > 0:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is looking for a lance to join".format(self.elements["NPC"]), ]
return mygram
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}, if you're planning to invite {} to join your lance.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a lance to join.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Medic(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
VIRTUES = (gears.personality.Peace,gears.personality.Fellowship)
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and gears.tags.Medic in pstate.elements["NPC"].job.tags
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_GREATERGOOD)
new_virtue = random.choice(self.VIRTUES)
if new_virtue not in npc.personality:
npc.personality.add(new_virtue)
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
else:
mylist.append(Offer("You've got a full crew right now, but if you ever find yourself in need of a qualified medic come back and find me.",
context=ContextTag((context.JOIN,)),
effect=self._defer_join
))
mylist.append(Offer(
"[HELLO] Lately I've been spending too much time here, when I'd rather be out in the danger zone saving lives.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} wants to leave {} so {} can make a positive difference in the world".format(self.elements["NPC"],self.elements["NPC"].get_scene(),self.elements["NPC"].gender.subject_pronoun), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _defer_join(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
self.end_plot(camp)
class RLM_Mercenary(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and {gears.tags.Adventurer,gears.tags.Military}.intersection(pstate.elements["NPC"].job.tags)
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_MERCENARY)
# This character gets extra mecha points, showing their good investment sense.
npc.relationship.data["mecha_level_bonus"] = 10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I'll join your lance for a mere ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I am a mercenary pilot, looking for my next contract.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is hoping to make some quick cash".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} can usually be found at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is a mercenary pilot looking for a job.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Professional(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown > 20
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_PROFESSIONAL)
# This character gets 10 extra stat points, showing their elite nature.
npc.roll_stats(10, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer(
"[NOEXPOSURE] I think ${} is a fair signing price. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)), data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I see you are also a cavalier.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is an experienced pilot looking for work".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}. Bring cash if you're planning to hire {}.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is an experienced pilot looking for work.".format(mynpc)
, mynpc.get_scene()
)
class RLM_RatherGeneric(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship()
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 60:
mylist.append(Offer("[IWOULDLOVETO] [THANKS_FOR_CHOOSING_ME]",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("My regular signing rate is ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Must be nice going off, having adventures with your lancemates. I'd like to do that again someday.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{} is looking for a new lance to join".format(self.elements["NPC"]), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
class RLM_DamagedGoodsSale(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_IMPROVER)
# This NPC gets a stat bonus but a crappy mech to show their history.
npc.relationship.data["mecha_level_bonus"] = -15
npc.roll_stats(5, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)//2
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 20:
mylist.append(Offer("[IWOULDLOVETO] I'll do my best to not let you down.",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("I'll sign up with you for just ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] I'll do my best to not let you down.",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] The life of a cavalier is full of ups and downs... right now I'm in one of those downs.", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Be careful out there... all it takes is one little mistake to cost you everything.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{NPC} is a down on {NPC.gender.possessive_determiner} luck cavalier looking for another chance".format(**self.elements), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}. Don't say that you weren't warned.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
| en | 0.87001 | # ******************* # *** UTILITIES *** # ******************* # ************************** # *** RANDOM_LANCEMATE *** # ************************** # Amazing stats, amazingly crap skills. Returns True if this plot matches the current plot state. Returns True if this plot matches the current plot state. Returns True if this plot matches the current plot state. #print(npc,scene) # ************************** # *** RLM_Relationship *** # ************************** # Elements: # NPC: The NPC who needs a personality # METROSCENE: The city or whatever that the NPC calls home # # These subplots contain a personality for a random (potential) lancemate. # Also include a means for the lancemate to gain the "RT_LANCEMATE" tag. Returns True if this plot matches the current plot state. # This character gets fewer mecha points. # This is an NPC in Wujung. Give them some news. Get any offers that could apply to non-element NPCs. # This is an NPC in Wujung. Give them some news. Get any offers that could apply to non-element NPCs. Returns True if this plot matches the current plot state. # This is an NPC in Wujung. Give them some news. Returns True if this plot matches the current plot state. # This character gets extra mecha points, showing their good investment sense. # This is an NPC in Wujung. Give them some news. Get any offers that could apply to non-element NPCs. Returns True if this plot matches the current plot state. # This character gets 10 extra stat points, showing their elite nature. # This is an NPC in Wujung. Give them some news. Get any offers that could apply to non-element NPCs. Get any offers that could apply to non-element NPCs. # This NPC gets a stat bonus but a crappy mech to show their history. Get any offers that could apply to non-element NPCs. | 2.221493 | 2 |
projects/detr/scripts/dd.py | zzzzzz0407/detectron2 | 1 | 7479 | import json
if __name__ == '__main__':
jsonFile = '/data00/home/zhangrufeng1/projects/detectron2/projects/detr/datasets/mot/mot17/annotations/mot17_train_half.json'
with open(jsonFile, 'r') as f:
infos = json.load(f)
count_dict = dict()
for info in infos["images"]:
if info["file_name"] in ["MOT17-02-FRCNN/img1/000091.jpg"]:
for ann in infos['annotations']:
if ann["image_id"] not in count_dict.keys() and ann["iscrowd"] == 0 and ann["bbox"][2] >= 1e-5 and ann["bbox"][3] >= 1e-5:
count_dict[ann["image_id"]] = 1
elif ann["image_id"] in count_dict.keys() and ann["iscrowd"] == 0:
count_dict[ann["image_id"]] += 1
max_count = 0
min_count = 999
num_freq = 0
for key, value in count_dict.items():
max_count = max(max_count, value)
min_count = min(min_count, value)
if value > 100:
num_freq += 1
print("max_count: {}".format(max_count))
print("min_count: {}".format(min_count))
print("num_freq: {}".format(num_freq))
| import json
if __name__ == '__main__':
jsonFile = '/data00/home/zhangrufeng1/projects/detectron2/projects/detr/datasets/mot/mot17/annotations/mot17_train_half.json'
with open(jsonFile, 'r') as f:
infos = json.load(f)
count_dict = dict()
for info in infos["images"]:
if info["file_name"] in ["MOT17-02-FRCNN/img1/000091.jpg"]:
for ann in infos['annotations']:
if ann["image_id"] not in count_dict.keys() and ann["iscrowd"] == 0 and ann["bbox"][2] >= 1e-5 and ann["bbox"][3] >= 1e-5:
count_dict[ann["image_id"]] = 1
elif ann["image_id"] in count_dict.keys() and ann["iscrowd"] == 0:
count_dict[ann["image_id"]] += 1
max_count = 0
min_count = 999
num_freq = 0
for key, value in count_dict.items():
max_count = max(max_count, value)
min_count = min(min_count, value)
if value > 100:
num_freq += 1
print("max_count: {}".format(max_count))
print("min_count: {}".format(min_count))
print("num_freq: {}".format(num_freq))
| none | 1 | 2.621495 | 3 |
|
app/app.py | wesleibarboza/natasha-virtual | 23 | 7480 | <reponame>wesleibarboza/natasha-virtual
# -*- coding: utf-8 -*-
"""Archivo principal para el echobot. Main File for the echobot"""
from fbmq import Page
from flask import Flask, request
# Token generado por la página web. Generated token in the facebook web page
PAGE_ACCESS_TOKEN = "COPY_HERE_YOUR_PAGE_ACCES_TOKEN"
# Token generado por nosotros. Token generated by us
VERIFY_TOKEN = "<PASSWORD>" # Si cambias este token, asegúrate de cambiarlo también en la página de configuración del webhook. If you change this token, verify that you changed it too in the webhook configuration.
app = Flask(__name__)
page = Page(PAGE_ACCESS_TOKEN) # Generamos la instancia de la página de facebook. We make the facebook page instance
@app.route('/')
def hello_world():
"""La página principal del servidor. The server main page."""
return 'Inicio del servidor'
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
"""El método que se ejecuta cuando Facebook se conecta. This method executes as Facebook connect to us."""
if request.method == 'POST': # if the message is a POST, we handle it with message_handler. Si el mensaje es POST, se maneja con el message_handler
# Facebook sends the user messages with a POST. Facebook manda los mensajes del usuario con un POST.
page.handle_webhook(request.get_data(as_text=True))
return 'ok'
elif request.method == 'GET': # if the message is a GET, we handle it here. Si el mensaje es un GET, lo manejamos aquí.
# The first you configure the webhook, FB sends a GET to your webhook to verify that it really is you, and you're not working on someone's else page.
# La primera vez que se configura el webhook, FB manda un mensaje GET para ver que realmente eres tú, y no estás trabajando en la página de alguien más.
if request.args.get('hub.verify_token') == VERIFY_TOKEN:
# If the verify token in the url matches our verify token we answer with the challenge to prove our identity.
# Si el verify token de la url concuerda con el de nosotros le respondemos con el challenge o reto para verificar que somos nosotros
return request.args.get('hub.challenge')
return 'Wrong Verify token'
@page.handle_message
def message_handler(event):
"""Este método se ejecuta cuando nos llega un mensaje a la página. This method executes whenever a message is sent to our page."""
# Se saca el id del sender. We get the sender id.
sender_id = event.sender_id
# Vemos si el mensaje es un texto o un adjunto (imagen, gif, sticker, etc)
# We see if the message is a text or an attachment (image, GIF, sticker, etc)
if event.is_text_message:
# We get the message from the event variable and sent it back7
# Obtenemos el mensaje de la variable event y se lo regresamos al usuario
page.send(sender_id, "Hey, you send me: {}".format(event.message_text))
elif event.is_attachment_message:
page.send(sender_id, "Boo, you didn't send a text. ")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True, threaded=True)
| # -*- coding: utf-8 -*-
"""Archivo principal para el echobot. Main File for the echobot"""
from fbmq import Page
from flask import Flask, request
# Token generado por la página web. Generated token in the facebook web page
PAGE_ACCESS_TOKEN = "COPY_HERE_YOUR_PAGE_ACCES_TOKEN"
# Token generado por nosotros. Token generated by us
VERIFY_TOKEN = "<PASSWORD>" # Si cambias este token, asegúrate de cambiarlo también en la página de configuración del webhook. If you change this token, verify that you changed it too in the webhook configuration.
app = Flask(__name__)
page = Page(PAGE_ACCESS_TOKEN) # Generamos la instancia de la página de facebook. We make the facebook page instance
@app.route('/')
def hello_world():
"""La página principal del servidor. The server main page."""
return 'Inicio del servidor'
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
"""El método que se ejecuta cuando Facebook se conecta. This method executes as Facebook connect to us."""
if request.method == 'POST': # if the message is a POST, we handle it with message_handler. Si el mensaje es POST, se maneja con el message_handler
# Facebook sends the user messages with a POST. Facebook manda los mensajes del usuario con un POST.
page.handle_webhook(request.get_data(as_text=True))
return 'ok'
elif request.method == 'GET': # if the message is a GET, we handle it here. Si el mensaje es un GET, lo manejamos aquí.
# The first you configure the webhook, FB sends a GET to your webhook to verify that it really is you, and you're not working on someone's else page.
# La primera vez que se configura el webhook, FB manda un mensaje GET para ver que realmente eres tú, y no estás trabajando en la página de alguien más.
if request.args.get('hub.verify_token') == VERIFY_TOKEN:
# If the verify token in the url matches our verify token we answer with the challenge to prove our identity.
# Si el verify token de la url concuerda con el de nosotros le respondemos con el challenge o reto para verificar que somos nosotros
return request.args.get('hub.challenge')
return 'Wrong Verify token'
@page.handle_message
def message_handler(event):
"""Este método se ejecuta cuando nos llega un mensaje a la página. This method executes whenever a message is sent to our page."""
# Se saca el id del sender. We get the sender id.
sender_id = event.sender_id
# Vemos si el mensaje es un texto o un adjunto (imagen, gif, sticker, etc)
# We see if the message is a text or an attachment (image, GIF, sticker, etc)
if event.is_text_message:
# We get the message from the event variable and sent it back7
# Obtenemos el mensaje de la variable event y se lo regresamos al usuario
page.send(sender_id, "Hey, you send me: {}".format(event.message_text))
elif event.is_attachment_message:
page.send(sender_id, "Boo, you didn't send a text. ")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True, threaded=True) | es | 0.706969 | # -*- coding: utf-8 -*- Archivo principal para el echobot. Main File for the echobot # Token generado por la página web. Generated token in the facebook web page # Token generado por nosotros. Token generated by us # Si cambias este token, asegúrate de cambiarlo también en la página de configuración del webhook. If you change this token, verify that you changed it too in the webhook configuration. # Generamos la instancia de la página de facebook. We make the facebook page instance La página principal del servidor. The server main page. El método que se ejecuta cuando Facebook se conecta. This method executes as Facebook connect to us. # if the message is a POST, we handle it with message_handler. Si el mensaje es POST, se maneja con el message_handler # Facebook sends the user messages with a POST. Facebook manda los mensajes del usuario con un POST. # if the message is a GET, we handle it here. Si el mensaje es un GET, lo manejamos aquí. # The first you configure the webhook, FB sends a GET to your webhook to verify that it really is you, and you're not working on someone's else page. # La primera vez que se configura el webhook, FB manda un mensaje GET para ver que realmente eres tú, y no estás trabajando en la página de alguien más. # If the verify token in the url matches our verify token we answer with the challenge to prove our identity. # Si el verify token de la url concuerda con el de nosotros le respondemos con el challenge o reto para verificar que somos nosotros Este método se ejecuta cuando nos llega un mensaje a la página. This method executes whenever a message is sent to our page. # Se saca el id del sender. We get the sender id. # Vemos si el mensaje es un texto o un adjunto (imagen, gif, sticker, etc) # We see if the message is a text or an attachment (image, GIF, sticker, etc) # We get the message from the event variable and sent it back7 # Obtenemos el mensaje de la variable event y se lo regresamos al usuario | 2.826879 | 3 |
Camvid/CamVid_utlis.py | Water2style/FCN-pytorch-CanRun | 7 | 7481 | <filename>Camvid/CamVid_utlis.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.misc
import random
import os
import imageio
#############################
# global variables #
#############################
root_dir = "/home/water/DATA/camvid-master"
data_dir = os.path.join(root_dir, "701_StillsRaw_full") # train data
label_dir = os.path.join(root_dir, "LabeledApproved_full") # train label
label_colors_file = os.path.join(root_dir, "label_colors.txt") # color to label
val_label_file = os.path.join(root_dir, "val.csv") # validation file
train_label_file = os.path.join(root_dir, "train.csv") # train file
# create dir for label index
label_idx_dir = os.path.join(root_dir, "Labeled_idx")
if not os.path.exists(label_idx_dir):
os.makedirs(label_idx_dir)
label2color = {}
color2label = {}
label2index = {}
index2label = {}
def divide_train_val(val_rate=0.1, shuffle=True, random_seed=None):
data_list = os.listdir(data_dir) #返回这个目录里,所有内容,‘图1’‘,图2’......
data_len = len(data_list) #702个图片 #注意这里是训练集
val_len = int(data_len * val_rate) #训练集700张,分10%的数量给验证集
if random_seed: #设置随机种子
random.seed(random_seed) #看看后面哪里用
if shuffle:
#sample(seq, n) 从序列seq中选择n个随机且独立的元素
data_idx = random.sample(range(data_len), data_len)
# data_idx 是从0到702 随机排序的数组
else:
data_idx = list(range(data_len)) #这个就是从0到702 依次排序
val_idx = [data_list[i] for i in data_idx[:val_len]] # 前70个,图片名 List
train_idx = [data_list[i] for i in data_idx[val_len:]] # 71到702个
# !创建 create val.csv
# "w"打开一个文件只用于写入。如果该文件已存在则打开文件,
# 并从开头开始编辑,即原有内容会被删除。
# 如果该文件不存在,创建新文件。
v = open(val_label_file, "w")
v.write("img,label\n") #write() 方法用于向文件中写入指定字符串
for idx, name in enumerate(val_idx):
if 'png' not in name: ##跳过损坏文件
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
v.write("{},{}\n".format(img_name, lab_name))
#最后生成了一个.csv文件,位于根目录
## 装的信息是: 2列,一列是验证集,70张 生图路径+名字,第二列是验证集对应的:标签图+名字+.npy
#png.npy :后面parse_label函数,就是在标签图路径里 生成 标签图+名字+.npy 文件!!!
# create train.csv 所以这2个.csv文件,这里存放的是信息 ,是: 生图信息和标签图+npy信息
t = open(train_label_file, "w")
t.write("img,label\n")
for idx, name in enumerate(train_idx):
if 'png' not in name:
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
t.write("{},{}\n".format(img_name, lab_name))
#parse:分析 分析标签
def parse_label():
# change label to class index
#“r”:以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
#label_colors.txt :!!装的是颜色和对应标签 64 128 64\tAnimal 颜色\t类别
# 只读,读好了之后 #不igore 就会bug
f = open(label_colors_file, "r").read().split("\n")[:-1] # ignore the last empty line
for idx, line in enumerate(f):
label = line.split()[-1] #提取所有label形成一个字符串 #动物,人,墙..
color = tuple([int(x) for x in line.split()[:-1]]) #形成一个元组 对应动物,人,墙..
#的颜色,比如动物的颜色是红色 :[128,0,0]....
print(label, color)
#d[key] = value
#设置d[key]的值为value,如果该key不存在,则为新增
#label2color[label] = color 运行后:
#就形成了1个字典: 以label做key,以color做value的新字典
#包含内容:{'Animal': (64, 128, 64), 'Archway': (192, 0, 128).....}
#后面有精彩用法....
label2color[label] = color
color2label[color] = label #{颜色:标签}
label2index[label] = idx # {标签:idx} {'Animal': 0, 'Archway': 1...}
index2label[idx] = label # {idx:标签}
#下面是作者自己标注的:
# rgb = np.zeros((255, 255, 3), dtype=np.uint8)
# rgb[..., 0] = color[0]
# rgb[..., 1] = color[1]
# rgb[..., 2] = color[2]
# imshow(rgb, title=label)
#enumerate :迭代器,0号,内容0;1号,内容1
for idx, name in enumerate(os.listdir(label_dir)): #os.listdir(label_dir) 是标签集里所有图片
#idx就是从0开始的序号 name是图片名 #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表,这个列表以字母顺序。
filename = os.path.join(label_idx_dir, name) # labeled_idx/所有图片名
if os.path.exists(filename + '.npy'): #检查是否有图片名.png.npy,当前应该是没有的
print("Skip %s" % (name)) #有了就跳过这个图 npy是numpy文件
continue
print("Parse %s" % (name)) ## 打出:Parse 图片名(不包含路径)
img = os.path.join(label_dir, name) ## img是路径,LabeledApproved_full/所有图片名
## 区分一下 和 filename之间的用法和关联?
img = imageio.imread(img) #用numpy(npy)格式打开一个图
height, weight, _ = img.shape # numpy存储图片格式(高,宽,3通道)
#Tensor是(3,高,宽)
#在大for循环里,对每一张图执行下面操作 img是上面读取的一个npy格式的图哈
idx_mat = np.zeros((height, weight)) #720*960
for h in range(height):
for w in range(weight): #前面也有个color啊,不同作用域功能不同
color = tuple(img[h, w]) # tuple(序列),把序列转为元组
#这里应该是把img[h,w]这个!像素点!(128,64,64)
# 抓出来弄成了一个元组,又因为遍历
#所以color是一个有 height*weight个元素的tuple
#color包含着这个图片里,所有的颜色
try: #try,except: 异常检测,try里顺序执行,如果,去执行except
#tuple类型的color在这里作为key,输出相应的value,也就是label值,dict的存储是一一对应的
#所以 出来的label是和输入的color 一一对应
label = color2label[color] # 给彩图像素点,返回像素点的label,就像是上面那图里只有猫和北京,返回:cat space
index = label2index[label] # 给label返回类型代表的号码,给cat sapce,返回1,5
idx_mat[h, w] = index #构成了一个由颜色到标签到标签序号处理后的图,一个点一个点送?
except:
print("error: img:%s, h:%d, w:%d" % (name, h, w))
idx_mat = idx_mat.astype(np.uint8) #转换数据类型
np.save(filename, idx_mat) #numpy.save(file, arr, allow_pickle=True, fix_imports=True)
#把当前(因为这个for里是逐像素点处理一张图)这个图的信息(numpy)存起来
print("Finish %s" % (name))
#跳出for,这个位置就是处理好了所有的图,生成了702个 png.npy图
#生成的这个是一个numpy图,每个图上,是标记好的序号
#就像 一个张图里是 建筑和空白,建筑位置上显示:4,4 = buildings标签 = buildings颜色[128,0,0]
# test some pixels' label ~~~~~~~~~~~~~~~~~~~~~~~~~~`
#img = os.path.join(label_dir, os.listdir(label_dir)[0]) #img数据:img[height,weight,rgb]
#img = imageio.imread(img)
#test_cases = [(555, 405), (0, 0), (380, 645), (577, 943)] # img[555,405]:此图此点的!位置信息!
#test_ans = ['Car', 'Building', 'Truck_Bus', 'Car'] #这个是肉眼去看哈,看上面的位置,对应的是啥label
#for idx, t in enumerate(test_cases):
#color = img[t] #相当于访问 img上的4个点的位置信息,输出的是这4个点对应的像素值(img是labeled,就那32个规整的颜色)
#assert color2label[tuple(color)] == test_ans[idx] ##检查一下对不对
#上面是作者乱标的,所以报错,我在jupyter通过肉眼看图并且调试,就对了哈!!
'''debug function'''
def imshow(img, title=None):
try:
img = mpimg.imread(img) #mpimg: matplotlib.image 输入的img是个地址哈,不是啥处理后的numpy数组
imgplot = plt.imshow(img)
except:
plt.imshow(img, interpolation='nearest')
if title is not None:
plt.title(title)
plt.show()
if __name__ == '__main__':
print("it starts working")
divide_train_val(random_seed=1)
parse_label()
print("process finished") | <filename>Camvid/CamVid_utlis.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.misc
import random
import os
import imageio
#############################
# global variables #
#############################
root_dir = "/home/water/DATA/camvid-master"
data_dir = os.path.join(root_dir, "701_StillsRaw_full") # train data
label_dir = os.path.join(root_dir, "LabeledApproved_full") # train label
label_colors_file = os.path.join(root_dir, "label_colors.txt") # color to label
val_label_file = os.path.join(root_dir, "val.csv") # validation file
train_label_file = os.path.join(root_dir, "train.csv") # train file
# create dir for label index
label_idx_dir = os.path.join(root_dir, "Labeled_idx")
if not os.path.exists(label_idx_dir):
os.makedirs(label_idx_dir)
label2color = {}
color2label = {}
label2index = {}
index2label = {}
def divide_train_val(val_rate=0.1, shuffle=True, random_seed=None):
data_list = os.listdir(data_dir) #返回这个目录里,所有内容,‘图1’‘,图2’......
data_len = len(data_list) #702个图片 #注意这里是训练集
val_len = int(data_len * val_rate) #训练集700张,分10%的数量给验证集
if random_seed: #设置随机种子
random.seed(random_seed) #看看后面哪里用
if shuffle:
#sample(seq, n) 从序列seq中选择n个随机且独立的元素
data_idx = random.sample(range(data_len), data_len)
# data_idx 是从0到702 随机排序的数组
else:
data_idx = list(range(data_len)) #这个就是从0到702 依次排序
val_idx = [data_list[i] for i in data_idx[:val_len]] # 前70个,图片名 List
train_idx = [data_list[i] for i in data_idx[val_len:]] # 71到702个
# !创建 create val.csv
# "w"打开一个文件只用于写入。如果该文件已存在则打开文件,
# 并从开头开始编辑,即原有内容会被删除。
# 如果该文件不存在,创建新文件。
v = open(val_label_file, "w")
v.write("img,label\n") #write() 方法用于向文件中写入指定字符串
for idx, name in enumerate(val_idx):
if 'png' not in name: ##跳过损坏文件
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
v.write("{},{}\n".format(img_name, lab_name))
#最后生成了一个.csv文件,位于根目录
## 装的信息是: 2列,一列是验证集,70张 生图路径+名字,第二列是验证集对应的:标签图+名字+.npy
#png.npy :后面parse_label函数,就是在标签图路径里 生成 标签图+名字+.npy 文件!!!
# create train.csv 所以这2个.csv文件,这里存放的是信息 ,是: 生图信息和标签图+npy信息
t = open(train_label_file, "w")
t.write("img,label\n")
for idx, name in enumerate(train_idx):
if 'png' not in name:
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
t.write("{},{}\n".format(img_name, lab_name))
#parse:分析 分析标签
def parse_label():
# change label to class index
#“r”:以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
#label_colors.txt :!!装的是颜色和对应标签 64 128 64\tAnimal 颜色\t类别
# 只读,读好了之后 #不igore 就会bug
f = open(label_colors_file, "r").read().split("\n")[:-1] # ignore the last empty line
for idx, line in enumerate(f):
label = line.split()[-1] #提取所有label形成一个字符串 #动物,人,墙..
color = tuple([int(x) for x in line.split()[:-1]]) #形成一个元组 对应动物,人,墙..
#的颜色,比如动物的颜色是红色 :[128,0,0]....
print(label, color)
#d[key] = value
#设置d[key]的值为value,如果该key不存在,则为新增
#label2color[label] = color 运行后:
#就形成了1个字典: 以label做key,以color做value的新字典
#包含内容:{'Animal': (64, 128, 64), 'Archway': (192, 0, 128).....}
#后面有精彩用法....
label2color[label] = color
color2label[color] = label #{颜色:标签}
label2index[label] = idx # {标签:idx} {'Animal': 0, 'Archway': 1...}
index2label[idx] = label # {idx:标签}
#下面是作者自己标注的:
# rgb = np.zeros((255, 255, 3), dtype=np.uint8)
# rgb[..., 0] = color[0]
# rgb[..., 1] = color[1]
# rgb[..., 2] = color[2]
# imshow(rgb, title=label)
#enumerate :迭代器,0号,内容0;1号,内容1
for idx, name in enumerate(os.listdir(label_dir)): #os.listdir(label_dir) 是标签集里所有图片
#idx就是从0开始的序号 name是图片名 #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表,这个列表以字母顺序。
filename = os.path.join(label_idx_dir, name) # labeled_idx/所有图片名
if os.path.exists(filename + '.npy'): #检查是否有图片名.png.npy,当前应该是没有的
print("Skip %s" % (name)) #有了就跳过这个图 npy是numpy文件
continue
print("Parse %s" % (name)) ## 打出:Parse 图片名(不包含路径)
img = os.path.join(label_dir, name) ## img是路径,LabeledApproved_full/所有图片名
## 区分一下 和 filename之间的用法和关联?
img = imageio.imread(img) #用numpy(npy)格式打开一个图
height, weight, _ = img.shape # numpy存储图片格式(高,宽,3通道)
#Tensor是(3,高,宽)
#在大for循环里,对每一张图执行下面操作 img是上面读取的一个npy格式的图哈
idx_mat = np.zeros((height, weight)) #720*960
for h in range(height):
for w in range(weight): #前面也有个color啊,不同作用域功能不同
color = tuple(img[h, w]) # tuple(序列),把序列转为元组
#这里应该是把img[h,w]这个!像素点!(128,64,64)
# 抓出来弄成了一个元组,又因为遍历
#所以color是一个有 height*weight个元素的tuple
#color包含着这个图片里,所有的颜色
try: #try,except: 异常检测,try里顺序执行,如果,去执行except
#tuple类型的color在这里作为key,输出相应的value,也就是label值,dict的存储是一一对应的
#所以 出来的label是和输入的color 一一对应
label = color2label[color] # 给彩图像素点,返回像素点的label,就像是上面那图里只有猫和北京,返回:cat space
index = label2index[label] # 给label返回类型代表的号码,给cat sapce,返回1,5
idx_mat[h, w] = index #构成了一个由颜色到标签到标签序号处理后的图,一个点一个点送?
except:
print("error: img:%s, h:%d, w:%d" % (name, h, w))
idx_mat = idx_mat.astype(np.uint8) #转换数据类型
np.save(filename, idx_mat) #numpy.save(file, arr, allow_pickle=True, fix_imports=True)
#把当前(因为这个for里是逐像素点处理一张图)这个图的信息(numpy)存起来
print("Finish %s" % (name))
#跳出for,这个位置就是处理好了所有的图,生成了702个 png.npy图
#生成的这个是一个numpy图,每个图上,是标记好的序号
#就像 一个张图里是 建筑和空白,建筑位置上显示:4,4 = buildings标签 = buildings颜色[128,0,0]
# test some pixels' label ~~~~~~~~~~~~~~~~~~~~~~~~~~`
#img = os.path.join(label_dir, os.listdir(label_dir)[0]) #img数据:img[height,weight,rgb]
#img = imageio.imread(img)
#test_cases = [(555, 405), (0, 0), (380, 645), (577, 943)] # img[555,405]:此图此点的!位置信息!
#test_ans = ['Car', 'Building', 'Truck_Bus', 'Car'] #这个是肉眼去看哈,看上面的位置,对应的是啥label
#for idx, t in enumerate(test_cases):
#color = img[t] #相当于访问 img上的4个点的位置信息,输出的是这4个点对应的像素值(img是labeled,就那32个规整的颜色)
#assert color2label[tuple(color)] == test_ans[idx] ##检查一下对不对
#上面是作者乱标的,所以报错,我在jupyter通过肉眼看图并且调试,就对了哈!!
'''debug function'''
def imshow(img, title=None):
try:
img = mpimg.imread(img) #mpimg: matplotlib.image 输入的img是个地址哈,不是啥处理后的numpy数组
imgplot = plt.imshow(img)
except:
plt.imshow(img, interpolation='nearest')
if title is not None:
plt.title(title)
plt.show()
if __name__ == '__main__':
print("it starts working")
divide_train_val(random_seed=1)
parse_label()
print("process finished") | zh | 0.895137 | # -*- coding: utf-8 -*- ############################# # global variables # ############################# # train data # train label # color to label # validation file # train file # create dir for label index #返回这个目录里,所有内容,‘图1’‘,图2’...... #702个图片 #注意这里是训练集 #训练集700张,分10%的数量给验证集 #设置随机种子 #看看后面哪里用 #sample(seq, n) 从序列seq中选择n个随机且独立的元素 # data_idx 是从0到702 随机排序的数组 #这个就是从0到702 依次排序 # 前70个,图片名 List # 71到702个 # !创建 create val.csv # "w"打开一个文件只用于写入。如果该文件已存在则打开文件, # 并从开头开始编辑,即原有内容会被删除。 # 如果该文件不存在,创建新文件。 #write() 方法用于向文件中写入指定字符串 ##跳过损坏文件 #最后生成了一个.csv文件,位于根目录 ## 装的信息是: 2列,一列是验证集,70张 生图路径+名字,第二列是验证集对应的:标签图+名字+.npy #png.npy :后面parse_label函数,就是在标签图路径里 生成 标签图+名字+.npy 文件!!! # create train.csv 所以这2个.csv文件,这里存放的是信息 ,是: 生图信息和标签图+npy信息 #parse:分析 分析标签 # change label to class index #“r”:以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。 #label_colors.txt :!!装的是颜色和对应标签 64 128 64\tAnimal 颜色\t类别 # 只读,读好了之后 #不igore 就会bug # ignore the last empty line #提取所有label形成一个字符串 #动物,人,墙.. #形成一个元组 对应动物,人,墙.. #的颜色,比如动物的颜色是红色 :[128,0,0].... #d[key] = value #设置d[key]的值为value,如果该key不存在,则为新增 #label2color[label] = color 运行后: #就形成了1个字典: 以label做key,以color做value的新字典 #包含内容:{'Animal': (64, 128, 64), 'Archway': (192, 0, 128).....} #后面有精彩用法.... #{颜色:标签} # {标签:idx} {'Animal': 0, 'Archway': 1...} # {idx:标签} #下面是作者自己标注的: # rgb = np.zeros((255, 255, 3), dtype=np.uint8) # rgb[..., 0] = color[0] # rgb[..., 1] = color[1] # rgb[..., 2] = color[2] # imshow(rgb, title=label) #enumerate :迭代器,0号,内容0;1号,内容1 #os.listdir(label_dir) 是标签集里所有图片 #idx就是从0开始的序号 name是图片名 #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表,这个列表以字母顺序。 # labeled_idx/所有图片名 #检查是否有图片名.png.npy,当前应该是没有的 #有了就跳过这个图 npy是numpy文件 ## 打出:Parse 图片名(不包含路径) ## img是路径,LabeledApproved_full/所有图片名 ## 区分一下 和 filename之间的用法和关联? #用numpy(npy)格式打开一个图 # numpy存储图片格式(高,宽,3通道) #Tensor是(3,高,宽) #在大for循环里,对每一张图执行下面操作 img是上面读取的一个npy格式的图哈 #720*960 #前面也有个color啊,不同作用域功能不同 # tuple(序列),把序列转为元组 #这里应该是把img[h,w]这个!像素点!(128,64,64) # 抓出来弄成了一个元组,又因为遍历 #所以color是一个有 height*weight个元素的tuple #color包含着这个图片里,所有的颜色 #try,except: 异常检测,try里顺序执行,如果,去执行except #tuple类型的color在这里作为key,输出相应的value,也就是label值,dict的存储是一一对应的 #所以 出来的label是和输入的color 一一对应 # 给彩图像素点,返回像素点的label,就像是上面那图里只有猫和北京,返回:cat space # 给label返回类型代表的号码,给cat sapce,返回1,5 #构成了一个由颜色到标签到标签序号处理后的图,一个点一个点送? #转换数据类型 #numpy.save(file, arr, allow_pickle=True, fix_imports=True) #把当前(因为这个for里是逐像素点处理一张图)这个图的信息(numpy)存起来 #跳出for,这个位置就是处理好了所有的图,生成了702个 png.npy图 #生成的这个是一个numpy图,每个图上,是标记好的序号 #就像 一个张图里是 建筑和空白,建筑位置上显示:4,4 = buildings标签 = buildings颜色[128,0,0] # test some pixels' label ~~~~~~~~~~~~~~~~~~~~~~~~~~` #img = os.path.join(label_dir, os.listdir(label_dir)[0]) #img数据:img[height,weight,rgb] #img = imageio.imread(img) #test_cases = [(555, 405), (0, 0), (380, 645), (577, 943)] # img[555,405]:此图此点的!位置信息! #test_ans = ['Car', 'Building', 'Truck_Bus', 'Car'] #这个是肉眼去看哈,看上面的位置,对应的是啥label #for idx, t in enumerate(test_cases): #color = img[t] #相当于访问 img上的4个点的位置信息,输出的是这4个点对应的像素值(img是labeled,就那32个规整的颜色) #assert color2label[tuple(color)] == test_ans[idx] ##检查一下对不对 #上面是作者乱标的,所以报错,我在jupyter通过肉眼看图并且调试,就对了哈!! debug function #mpimg: matplotlib.image 输入的img是个地址哈,不是啥处理后的numpy数组 | 2.61704 | 3 |
stream-reasoner/ws_client.py | patrik999/AdaptiveStreamReasoningMonitoring | 1 | 7482 | #!/usr/bin/env python
import websocket
import time
try:
import thread
except ImportError:
import _thread as thread
runs = 100
def on_message(ws, message):
print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
for i in range(runs):
time.sleep(5)
ws.send("Ping")
time.sleep(1)
ws.close()
print("thread terminating...")
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
url = "ws://localhost:8082"
ws = websocket.WebSocketApp(url, on_message = on_message, on_error = on_error, on_close = on_close)
ws.on_open = on_open
ws.run_forever()
| #!/usr/bin/env python
import websocket
import time
try:
import thread
except ImportError:
import _thread as thread
runs = 100
def on_message(ws, message):
print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
for i in range(runs):
time.sleep(5)
ws.send("Ping")
time.sleep(1)
ws.close()
print("thread terminating...")
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
url = "ws://localhost:8082"
ws = websocket.WebSocketApp(url, on_message = on_message, on_error = on_error, on_close = on_close)
ws.on_open = on_open
ws.run_forever()
| en | 0.452143 | #!/usr/bin/env python ## closed ###") | 2.724776 | 3 |
Main.py | samuelterra22/Data-Mining | 0 | 7483 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
from matplotlib import rcParams
import statsmodels.api as sm
from statsmodels.formula.api import ols
df = pd.read_csv('kc_house_data.csv')
# print(df.head())
# print(df.isnull().any())
# print(df.describe())
# fig = plt.figure(figsize=(12, 6))
# sqft = fig.add_subplot(121)
# cost = fig.add_subplot(122)
#
# sqft.hist(df.sqft_living, bins=80)
# sqft.set_xlabel('Ft^2')
# sqft.set_title("Histogram of House Square Footage")
#
# cost.hist(df.price, bins=80)
# cost.set_xlabel('Price ($)')
# cost.set_title("Histogram of Housing Prices")
#
# plt.show()
# m = ols('price ~ sqft_living', df).fit()
# print(m.summary())
# m = ols('price ~ sqft_living + bedrooms + grade + condition',df).fit()
# print (m.summary())
sns.jointplot(x="sqft_living", y="price", data=df, kind='reg', fit_reg=True, size=7)
plt.show()
| import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
from matplotlib import rcParams
import statsmodels.api as sm
from statsmodels.formula.api import ols
df = pd.read_csv('kc_house_data.csv')
# print(df.head())
# print(df.isnull().any())
# print(df.describe())
# fig = plt.figure(figsize=(12, 6))
# sqft = fig.add_subplot(121)
# cost = fig.add_subplot(122)
#
# sqft.hist(df.sqft_living, bins=80)
# sqft.set_xlabel('Ft^2')
# sqft.set_title("Histogram of House Square Footage")
#
# cost.hist(df.price, bins=80)
# cost.set_xlabel('Price ($)')
# cost.set_title("Histogram of Housing Prices")
#
# plt.show()
# m = ols('price ~ sqft_living', df).fit()
# print(m.summary())
# m = ols('price ~ sqft_living + bedrooms + grade + condition',df).fit()
# print (m.summary())
sns.jointplot(x="sqft_living", y="price", data=df, kind='reg', fit_reg=True, size=7)
plt.show()
| en | 0.24939 | # print(df.head()) # print(df.isnull().any()) # print(df.describe()) # fig = plt.figure(figsize=(12, 6)) # sqft = fig.add_subplot(121) # cost = fig.add_subplot(122) # # sqft.hist(df.sqft_living, bins=80) # sqft.set_xlabel('Ft^2') # sqft.set_title("Histogram of House Square Footage") # # cost.hist(df.price, bins=80) # cost.set_xlabel('Price ($)') # cost.set_title("Histogram of Housing Prices") # # plt.show() # m = ols('price ~ sqft_living', df).fit() # print(m.summary()) # m = ols('price ~ sqft_living + bedrooms + grade + condition',df).fit() # print (m.summary()) | 3.028525 | 3 |
whole_cell_patch/filterDialog.py | 11uc/whole_cell_patch | 2 | 7484 | <reponame>11uc/whole_cell_patch<filename>whole_cell_patch/filterDialog.py
# Dialogs for setting filter parameters.
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QLineEdit, QVBoxLayout, QHBoxLayout, QDialog, QComboBox, QWidget
from PyQt5.QtCore import pyqtSignal
class FilterDialog(QDialog):
'''
Dialog for choosing filter types.
'''
def __init__(self, default, parent = None):
'''
Build ui and set up parameter setting
Parameters
----------
default: list
List of filters, which are dictionaries with names under
key "name" and parameter elements.
parent: QWidget, optional
Parent widget.
Attributes
----------
fnames: dictionary
Names of filters, two nested dictionaries to specify two
properties about the type of filters.
'''
self.defaultFilters = default
super().__init__(parent)
self.filterCb = QComboBox(self) # Filter type
self.bandCb = QComboBox(self) # Band type
self.fnames = {}
count = 0
for f in default:
names = f["name"].split(',')
if names[0] not in self.fnames:
self.fnames[names[0]] = {}
self.filterCb.addItem(names[0])
if len(names) > 1:
if names[1] not in self.fnames[names[0]]:
self.fnames[names[0]][names[1]] = count
else:
self.fnames[names[0]][''] = count
count += 1
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
okBtn.clicked.connect(self.accept)
cancelBtn.clicked.connect(self.reject)
self.filterCb.currentTextChanged.connect(self.updateBand)
topVB = QVBoxLayout(self)
topVB.addWidget(self.filterCb)
topVB.addWidget(self.bandCb)
topVB.addWidget(okBtn)
topVB.addWidget(cancelBtn)
def updateBand(self, name):
'''
Update list of band in the band combobox.
Parameters
----------
name: str
Name of filter type.
'''
self.bandCb.clear()
self.bandCb.addItems(list(self.fnames[name].keys()))
def exec_(self):
'''
Override QDialog exec_ function. Alter return code to -1 for rejection
and integer number for chosen filter's id.
'''
ret = super().exec_()
if ret:
return self.fnames[self.filterCb.currentText()][
self.bandCb.currentText()]
else:
return -1
class FilterParamDialog(QDialog):
'''
Dialog for setting filter parameters.
'''
def __init__(self, parent = None):
'''
Build ui and set up connections.
Parameters
----------
parent: QWidget, optional
Parent widget.
Attributes
----------
form: dictionary
Parameter names as keys and corresponding QLineEdit object
as values.
formWd: QWidget
Container for displaying the parameter setting form.
'''
super().__init__(parent)
self.form = {}
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
topVB = QVBoxLayout(self)
self.formVB = QVBoxLayout()
self.formWd = None
btnHB = QHBoxLayout()
btnHB.addWidget(okBtn)
btnHB.addWidget(cancelBtn)
cancelBtn.clicked.connect(self.reject)
okBtn.clicked.connect(self.accept)
topVB.addLayout(self.formVB)
topVB.addLayout(btnHB)
def makeForm(self, filt):
'''
Build parameters setting grid layout for filter filt.
Parameters
----------
filt: dictionary
Filter information, parameters are in string format.
'''
# clear the previous form widget
if self.formWd != None:
self.formVB.removeWidget(self.formWd)
self.form = {}
self.formWd.setParent(None)
del self.formWd
self.formWd = None
self.formWd = QWidget()
formGrid = QGridLayout(self.formWd)
row = 0
for k, v in filt.items():
if k != "name":
self.form[k] = QLineEdit(v, self.formWd)
formGrid.addWidget(QLabel(k, self.formWd), row, 0)
formGrid.addWidget(self.form[k], row, 1)
row = row + 1
self.formVB.addWidget(self.formWd)
def getForm(self):
'''
Get the parameters filled in the QLineEdit objects.
Returns
-------
filt: dictionary
Filter information, without name.
'''
filt = {}
for k, v in self.form.items():
filt[k] = v.text()
return filt
| # Dialogs for setting filter parameters.
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QLineEdit, QVBoxLayout, QHBoxLayout, QDialog, QComboBox, QWidget
from PyQt5.QtCore import pyqtSignal
class FilterDialog(QDialog):
'''
Dialog for choosing filter types.
'''
def __init__(self, default, parent = None):
'''
Build ui and set up parameter setting
Parameters
----------
default: list
List of filters, which are dictionaries with names under
key "name" and parameter elements.
parent: QWidget, optional
Parent widget.
Attributes
----------
fnames: dictionary
Names of filters, two nested dictionaries to specify two
properties about the type of filters.
'''
self.defaultFilters = default
super().__init__(parent)
self.filterCb = QComboBox(self) # Filter type
self.bandCb = QComboBox(self) # Band type
self.fnames = {}
count = 0
for f in default:
names = f["name"].split(',')
if names[0] not in self.fnames:
self.fnames[names[0]] = {}
self.filterCb.addItem(names[0])
if len(names) > 1:
if names[1] not in self.fnames[names[0]]:
self.fnames[names[0]][names[1]] = count
else:
self.fnames[names[0]][''] = count
count += 1
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
okBtn.clicked.connect(self.accept)
cancelBtn.clicked.connect(self.reject)
self.filterCb.currentTextChanged.connect(self.updateBand)
topVB = QVBoxLayout(self)
topVB.addWidget(self.filterCb)
topVB.addWidget(self.bandCb)
topVB.addWidget(okBtn)
topVB.addWidget(cancelBtn)
def updateBand(self, name):
'''
Update list of band in the band combobox.
Parameters
----------
name: str
Name of filter type.
'''
self.bandCb.clear()
self.bandCb.addItems(list(self.fnames[name].keys()))
def exec_(self):
'''
Override QDialog exec_ function. Alter return code to -1 for rejection
and integer number for chosen filter's id.
'''
ret = super().exec_()
if ret:
return self.fnames[self.filterCb.currentText()][
self.bandCb.currentText()]
else:
return -1
class FilterParamDialog(QDialog):
'''
Dialog for setting filter parameters.
'''
def __init__(self, parent = None):
'''
Build ui and set up connections.
Parameters
----------
parent: QWidget, optional
Parent widget.
Attributes
----------
form: dictionary
Parameter names as keys and corresponding QLineEdit object
as values.
formWd: QWidget
Container for displaying the parameter setting form.
'''
super().__init__(parent)
self.form = {}
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
topVB = QVBoxLayout(self)
self.formVB = QVBoxLayout()
self.formWd = None
btnHB = QHBoxLayout()
btnHB.addWidget(okBtn)
btnHB.addWidget(cancelBtn)
cancelBtn.clicked.connect(self.reject)
okBtn.clicked.connect(self.accept)
topVB.addLayout(self.formVB)
topVB.addLayout(btnHB)
def makeForm(self, filt):
'''
Build parameters setting grid layout for filter filt.
Parameters
----------
filt: dictionary
Filter information, parameters are in string format.
'''
# clear the previous form widget
if self.formWd != None:
self.formVB.removeWidget(self.formWd)
self.form = {}
self.formWd.setParent(None)
del self.formWd
self.formWd = None
self.formWd = QWidget()
formGrid = QGridLayout(self.formWd)
row = 0
for k, v in filt.items():
if k != "name":
self.form[k] = QLineEdit(v, self.formWd)
formGrid.addWidget(QLabel(k, self.formWd), row, 0)
formGrid.addWidget(self.form[k], row, 1)
row = row + 1
self.formVB.addWidget(self.formWd)
def getForm(self):
'''
Get the parameters filled in the QLineEdit objects.
Returns
-------
filt: dictionary
Filter information, without name.
'''
filt = {}
for k, v in self.form.items():
filt[k] = v.text()
return filt | en | 0.474487 | # Dialogs for setting filter parameters. Dialog for choosing filter types. Build ui and set up parameter setting Parameters ---------- default: list List of filters, which are dictionaries with names under key "name" and parameter elements. parent: QWidget, optional Parent widget. Attributes ---------- fnames: dictionary Names of filters, two nested dictionaries to specify two properties about the type of filters. # Filter type # Band type Update list of band in the band combobox. Parameters ---------- name: str Name of filter type. Override QDialog exec_ function. Alter return code to -1 for rejection and integer number for chosen filter's id. Dialog for setting filter parameters. Build ui and set up connections. Parameters ---------- parent: QWidget, optional Parent widget. Attributes ---------- form: dictionary Parameter names as keys and corresponding QLineEdit object as values. formWd: QWidget Container for displaying the parameter setting form. Build parameters setting grid layout for filter filt. Parameters ---------- filt: dictionary Filter information, parameters are in string format. # clear the previous form widget Get the parameters filled in the QLineEdit objects. Returns ------- filt: dictionary Filter information, without name. | 2.61449 | 3 |
projects/controllable_dialogue/tasks/agents.py | zl930216/ParlAI | 41 | 7485 | <filename>projects/controllable_dialogue/tasks/agents.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from .build import build, make_path
from parlai.utils.misc import warn_once
from parlai.core.teachers import ParlAIDialogTeacher
def _path(opt):
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
warn_once("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
return make_path(opt, datatype + '.txt')
class DefaultTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['parlaidialogteacher_datafile'] = _path(opt)
super().__init__(opt, shared)
| <filename>projects/controllable_dialogue/tasks/agents.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from .build import build, make_path
from parlai.utils.misc import warn_once
from parlai.core.teachers import ParlAIDialogTeacher
def _path(opt):
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
warn_once("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
return make_path(opt, datatype + '.txt')
class DefaultTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['parlaidialogteacher_datafile'] = _path(opt)
super().__init__(opt, shared)
| en | 0.896436 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. | 2.13632 | 2 |
problems/p009.py | davisschenk/project-euler-python | 0 | 7486 | <gh_stars>0
from math import ceil, sqrt
from problem import Problem
from utils.math import gcd
class PythagoreanTriplet(Problem, name="Special Pythagorean triplet", expected=31875000):
@Problem.solution()
def brute_force(self, ts=1000):
for a in range(3, round((ts - 3) / 2)):
for b in range(a + 1, round((ts - 1 - a) / 2)):
c = ts - a - b
if c * c == a * a + b * b:
return a * b * c
@Problem.solution()
def parametrisation(self, ts=1000):
s2 = ts / 2
mlimit = ceil(sqrt(s2)) - 1
for m in range(2, mlimit):
if s2 % m == 0:
sm = s2 / m
while sm % 2 == 0:
sm /= 2
if m % 2 == 1:
k = m + 2
else:
k = m + 1
while k < 2 * m and k <= sm:
if sm % k == 0 and gcd(k, m) == 1:
d = s2 / (k * m)
n = k - m
a = d * (m * m - n * n)
b = 2 * d * m * n
c = d * (m * m + n * n)
return a * b * c
k += 2
| from math import ceil, sqrt
from problem import Problem
from utils.math import gcd
class PythagoreanTriplet(Problem, name="Special Pythagorean triplet", expected=31875000):
@Problem.solution()
def brute_force(self, ts=1000):
for a in range(3, round((ts - 3) / 2)):
for b in range(a + 1, round((ts - 1 - a) / 2)):
c = ts - a - b
if c * c == a * a + b * b:
return a * b * c
@Problem.solution()
def parametrisation(self, ts=1000):
s2 = ts / 2
mlimit = ceil(sqrt(s2)) - 1
for m in range(2, mlimit):
if s2 % m == 0:
sm = s2 / m
while sm % 2 == 0:
sm /= 2
if m % 2 == 1:
k = m + 2
else:
k = m + 1
while k < 2 * m and k <= sm:
if sm % k == 0 and gcd(k, m) == 1:
d = s2 / (k * m)
n = k - m
a = d * (m * m - n * n)
b = 2 * d * m * n
c = d * (m * m + n * n)
return a * b * c
k += 2 | none | 1 | 3.454254 | 3 |
|
Roche.py | murbanec/Roche2D | 0 | 7487 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 10:37:04 2021
@author: <NAME>
"""
#calculates trajectory of small mass positioned close to L4 Lagrange point
#creates gif as output
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, PillowWriter
DistanceJ = 778570000000. # m JUPITER FROM SUN
G = 6.67259*10**-11
Jupiter_mass = 1.8982*10**27 # kg
Sun_mass = 1.989*10**30 # kg
M1=Sun_mass
M2=Jupiter_mass
a=DistanceJ
Ang_vel=math.sqrt(G*(M1+M2)/(a**3)) #FROM KEPLER LAW
P=2.*math.pi/Ang_vel #Period
#center of mass is located at [0,0] massive object (Sun) is located at -r1, secondary object (Jupiter) is located at +r2
r2=M1*a/(M1+M2)
r1=M2*a/(M1+M2)
# Calculations are done in corotating frame
# s1, s2 are distances from sources of gravity (Sun, Jupiter)
def pot(x,y):
r=math.sqrt(x*x + y*y)
if x==0:
if y>0:
theta=math.pi/2.
if y<0:
theta=math.pi/2.
if x>0:
theta=math.atan(abs(y)/x)
else:
theta=math.pi-math.atan(abs(y)/x)
s1=math.sqrt(r1*r1 + r*r + 2.*r1*r*math.cos(theta))
s2=math.sqrt(r2*r2 + r*r - 2.*r2*r*math.cos(theta))
result = -G*(M1/s1 + M2/s2) -1.*Ang_vel*Ang_vel*r*r/2.
return result
#Force per unit mass (acceleration) in x direction
# ax = \partial pot(x,y) / \partial x - 2 \Omega \times v
# in our case \Omega=(0,0,\Omega) and v=(vx,vy,0)
# second term is corresponding to Coriolis force
def ax(x,y,vx,vy):
dx=a/1000.
# result=-(pot(x+dx,y) -pot(x-dx,y))/(2.*dx) + 2.* Ang_vel*vy
result=-(-pot(x+2.*dx,y) + 8.*pot(x+dx,y) - 8.*pot(x-dx,y) + pot(x-2.*dx,y))/(12.*dx) + 2.* Ang_vel*vy
return result
def ay(x,y,vx,vy):
dy=a/1000.
# result=-( pot(x,y+dy)-pot(x,y-dy))/(dy*2.) - 2.* Ang_vel*vx
result=-(-pot(x,y+2.*dy) + 8.*pot(x,y+dy) - 8.*pot(x,y-dy) + pot(x,y-2*dy))/(dy*12.) - 2.* Ang_vel*vx
return result
pot2=np.vectorize(pot)
#TRAJECTORY OF ASTEROID CLOSE STARTING CLOSE TO L4 in rest with respect to the rotating frame
x0=a/2.-r1
y0=math.sqrt(3)*a/2.
x0=1.005*x0
y0=1.005*y0
vx0=0.
vy0=0.
steps=300000
#initialize arrays
x= np.linspace(0, 10, steps)
y= np.linspace(0, 10, steps)
vx=np.linspace(0, 10, steps)
vy=np.linspace(0, 10, steps)
t= np.linspace(0, 10, steps)
x[0]=x0
vx[0]=vx0
y[0]=y0
vy[0]=vy0
t[0]=0.
i=0
timescale = math.sqrt((a*a)**1.5 / G/(M1+M2))
dt=timescale/1000.
#using 4th order Runge-Kutta to solve the a_x= d v_x/ dt
# dt is constant set to timescale/1000
for i in range (1,steps):
t[i]=(t[i-1]+dt)
Kx1=dt*ax(x[i-1],y[i-1],vx[i-1],vy[i-1])
Kx2=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx1/2.,vy[i-1])
Kx3=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx2/2.,vy[i-1])
Kx4=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx3,vy[i-1])
vx[i]=vx[i-1] + Kx1/6. + Kx2/3. + Kx3/3. + Kx4/6.
Ky1=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1])
Ky2=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky1/2.)
Ky3=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky2/2.)
Ky4=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky3)
vy[i]=vy[i-1] + Ky1/6. + Ky2/3. + Ky3/3. + Ky4/6.
x[i]=x[i-1] + (vx[i-1]+vx[i])*dt/2. #taking the average of velocities
y[i]=y[i-1] + (vy[i-1]+vy[i])*dt/2.
dt=timescale/1000.
#LAGRANGE POINTS
#L3, L1 and L2 points are lying on x-axis (left to right) for small values of alpha=M2/(M1+M2) the positions can are given analytically (to first order in alpha)
alpha=M2/(M1+M2)
L1X=a*(1.-(alpha/3.)**(1./3.))
L1Y=0.
P1=pot(L1X,L1Y)
L2X=a*(1.+(alpha/3.)**(1./3.))
L2Y=0.
P2=pot(L2X,L2Y)
L3X=-a*(1. + 5.*alpha/12)
L3Y=0.
P3=pot(L3X,L3Y)
L4X=a/2.-r1
L4Y=math.sqrt(3)*a/2.
P4=pot2(L4X,L4Y)
P0=pot(x0,y0)
steps=301
xx= np.arange(-2*a, 2.*a,a/steps)
yy= np.arange(-1.5*a, 1.5*a,a/steps)
X, Y = np.meshgrid(xx, yy)
Z1=pot2(X,Y)
fig, ax = plt.subplots()
ax.set_aspect('equal','box')
ln1, = plt.plot([],[], 'k+')
ln2, = plt.plot([], [], 'm*')
XXX,YYY=[],[]
def init():
ax.set_xlim(-1.25,1.25)
ax.set_ylim(-1.25,1.25)
ax.contour(X/a, Y/a, Z1,levels=[P1,P2,P3,P0],colors=('r', 'green', 'blue', 'm'))
def update(i):
ln1.set_data(x[1000*i]/a, y[1000*i]/a)
zed= np.arange(60)
ani = FuncAnimation(fig, update, np.arange(300), init_func=init)
plt.show()
writer = PillowWriter(fps=25)
ani.save("Animation.gif", writer=writer)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 10:37:04 2021
@author: <NAME>
"""
#calculates trajectory of small mass positioned close to L4 Lagrange point
#creates gif as output
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, PillowWriter
DistanceJ = 778570000000. # m JUPITER FROM SUN
G = 6.67259*10**-11
Jupiter_mass = 1.8982*10**27 # kg
Sun_mass = 1.989*10**30 # kg
M1=Sun_mass
M2=Jupiter_mass
a=DistanceJ
Ang_vel=math.sqrt(G*(M1+M2)/(a**3)) #FROM KEPLER LAW
P=2.*math.pi/Ang_vel #Period
#center of mass is located at [0,0] massive object (Sun) is located at -r1, secondary object (Jupiter) is located at +r2
r2=M1*a/(M1+M2)
r1=M2*a/(M1+M2)
# Calculations are done in corotating frame
# s1, s2 are distances from sources of gravity (Sun, Jupiter)
def pot(x,y):
r=math.sqrt(x*x + y*y)
if x==0:
if y>0:
theta=math.pi/2.
if y<0:
theta=math.pi/2.
if x>0:
theta=math.atan(abs(y)/x)
else:
theta=math.pi-math.atan(abs(y)/x)
s1=math.sqrt(r1*r1 + r*r + 2.*r1*r*math.cos(theta))
s2=math.sqrt(r2*r2 + r*r - 2.*r2*r*math.cos(theta))
result = -G*(M1/s1 + M2/s2) -1.*Ang_vel*Ang_vel*r*r/2.
return result
#Force per unit mass (acceleration) in x direction
# ax = \partial pot(x,y) / \partial x - 2 \Omega \times v
# in our case \Omega=(0,0,\Omega) and v=(vx,vy,0)
# second term is corresponding to Coriolis force
def ax(x,y,vx,vy):
dx=a/1000.
# result=-(pot(x+dx,y) -pot(x-dx,y))/(2.*dx) + 2.* Ang_vel*vy
result=-(-pot(x+2.*dx,y) + 8.*pot(x+dx,y) - 8.*pot(x-dx,y) + pot(x-2.*dx,y))/(12.*dx) + 2.* Ang_vel*vy
return result
def ay(x,y,vx,vy):
dy=a/1000.
# result=-( pot(x,y+dy)-pot(x,y-dy))/(dy*2.) - 2.* Ang_vel*vx
result=-(-pot(x,y+2.*dy) + 8.*pot(x,y+dy) - 8.*pot(x,y-dy) + pot(x,y-2*dy))/(dy*12.) - 2.* Ang_vel*vx
return result
pot2=np.vectorize(pot)
#TRAJECTORY OF ASTEROID CLOSE STARTING CLOSE TO L4 in rest with respect to the rotating frame
x0=a/2.-r1
y0=math.sqrt(3)*a/2.
x0=1.005*x0
y0=1.005*y0
vx0=0.
vy0=0.
steps=300000
#initialize arrays
x= np.linspace(0, 10, steps)
y= np.linspace(0, 10, steps)
vx=np.linspace(0, 10, steps)
vy=np.linspace(0, 10, steps)
t= np.linspace(0, 10, steps)
x[0]=x0
vx[0]=vx0
y[0]=y0
vy[0]=vy0
t[0]=0.
i=0
timescale = math.sqrt((a*a)**1.5 / G/(M1+M2))
dt=timescale/1000.
#using 4th order Runge-Kutta to solve the a_x= d v_x/ dt
# dt is constant set to timescale/1000
for i in range (1,steps):
t[i]=(t[i-1]+dt)
Kx1=dt*ax(x[i-1],y[i-1],vx[i-1],vy[i-1])
Kx2=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx1/2.,vy[i-1])
Kx3=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx2/2.,vy[i-1])
Kx4=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx3,vy[i-1])
vx[i]=vx[i-1] + Kx1/6. + Kx2/3. + Kx3/3. + Kx4/6.
Ky1=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1])
Ky2=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky1/2.)
Ky3=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky2/2.)
Ky4=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky3)
vy[i]=vy[i-1] + Ky1/6. + Ky2/3. + Ky3/3. + Ky4/6.
x[i]=x[i-1] + (vx[i-1]+vx[i])*dt/2. #taking the average of velocities
y[i]=y[i-1] + (vy[i-1]+vy[i])*dt/2.
dt=timescale/1000.
#LAGRANGE POINTS
#L3, L1 and L2 points are lying on x-axis (left to right) for small values of alpha=M2/(M1+M2) the positions can are given analytically (to first order in alpha)
alpha=M2/(M1+M2)
L1X=a*(1.-(alpha/3.)**(1./3.))
L1Y=0.
P1=pot(L1X,L1Y)
L2X=a*(1.+(alpha/3.)**(1./3.))
L2Y=0.
P2=pot(L2X,L2Y)
L3X=-a*(1. + 5.*alpha/12)
L3Y=0.
P3=pot(L3X,L3Y)
L4X=a/2.-r1
L4Y=math.sqrt(3)*a/2.
P4=pot2(L4X,L4Y)
P0=pot(x0,y0)
steps=301
xx= np.arange(-2*a, 2.*a,a/steps)
yy= np.arange(-1.5*a, 1.5*a,a/steps)
X, Y = np.meshgrid(xx, yy)
Z1=pot2(X,Y)
fig, ax = plt.subplots()
ax.set_aspect('equal','box')
ln1, = plt.plot([],[], 'k+')
ln2, = plt.plot([], [], 'm*')
XXX,YYY=[],[]
def init():
ax.set_xlim(-1.25,1.25)
ax.set_ylim(-1.25,1.25)
ax.contour(X/a, Y/a, Z1,levels=[P1,P2,P3,P0],colors=('r', 'green', 'blue', 'm'))
def update(i):
ln1.set_data(x[1000*i]/a, y[1000*i]/a)
zed= np.arange(60)
ani = FuncAnimation(fig, update, np.arange(300), init_func=init)
plt.show()
writer = PillowWriter(fps=25)
ani.save("Animation.gif", writer=writer)
| en | 0.812949 | # -*- coding: utf-8 -*- Created on Thu Jan 14 10:37:04 2021 @author: <NAME> #calculates trajectory of small mass positioned close to L4 Lagrange point #creates gif as output # m JUPITER FROM SUN # kg # kg #FROM KEPLER LAW #Period #center of mass is located at [0,0] massive object (Sun) is located at -r1, secondary object (Jupiter) is located at +r2 # Calculations are done in corotating frame # s1, s2 are distances from sources of gravity (Sun, Jupiter) #Force per unit mass (acceleration) in x direction # ax = \partial pot(x,y) / \partial x - 2 \Omega \times v # in our case \Omega=(0,0,\Omega) and v=(vx,vy,0) # second term is corresponding to Coriolis force # result=-(pot(x+dx,y) -pot(x-dx,y))/(2.*dx) + 2.* Ang_vel*vy # result=-( pot(x,y+dy)-pot(x,y-dy))/(dy*2.) - 2.* Ang_vel*vx #TRAJECTORY OF ASTEROID CLOSE STARTING CLOSE TO L4 in rest with respect to the rotating frame #initialize arrays #using 4th order Runge-Kutta to solve the a_x= d v_x/ dt # dt is constant set to timescale/1000 #taking the average of velocities #LAGRANGE POINTS #L3, L1 and L2 points are lying on x-axis (left to right) for small values of alpha=M2/(M1+M2) the positions can are given analytically (to first order in alpha) | 3.040827 | 3 |
youtube_dl/extractor/azubu.py | LyleH/youtube-dl | 0 | 7488 | <gh_stars>0
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
class AzubuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
'info_dict': {
'id': '15575',
'ext': 'mp4',
'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1417523507.334,
'upload_date': '20141202',
'duration': 9988.7,
'uploader': 'GSL',
'uploader_id': 414310,
'view_count': int,
},
},
{
'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
'info_dict': {
'id': '9344',
'ext': 'mp4',
'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1410530893.320,
'upload_date': '20140912',
'duration': 172.385,
'uploader': 'FnaticTV',
'uploader_id': 272749,
'view_count': int,
},
'skip': 'Channel offline',
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
title = data['title'].strip()
description = data.get('description')
thumbnail = data.get('thumbnail')
view_count = data.get('view_count')
user = data.get('user', {})
uploader = user.get('username')
uploader_id = user.get('id')
stream_params = json.loads(data['stream_params'])
timestamp = float_or_none(stream_params.get('creationDate'), 1000)
duration = float_or_none(stream_params.get('length'), 1000)
renditions = stream_params.get('renditions') or []
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
if video:
renditions.append(video)
if not renditions and not user.get('channel', {}).get('is_live', True):
raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True)
formats = [{
'url': fmt['url'],
'width': fmt['frameWidth'],
'height': fmt['frameHeight'],
'vbr': float_or_none(fmt['encodingRate'], 1000),
'filesize': fmt['size'],
'vcodec': fmt['videoCodec'],
'container': fmt['videoContainer'],
} for fmt in renditions if fmt['url']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'formats': formats,
}
class AzubuLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.tv/(?P<id>[^/]+)$'
_TEST = {
'url': 'http://www.azubu.tv/MarsTVMDLen',
'only_matching': True,
}
def _real_extract(self, url):
user = self._match_id(url)
info = self._download_json(
'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
user)['data']
if info['type'] != 'STREAM':
raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
req = sanitized_Request(
'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
bc_info = self._download_json(req, user)
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
self._sort_formats(formats)
return {
'id': info['id'],
'title': self._live_title(info['title']),
'uploader_id': user,
'formats': formats,
'is_live': True,
'thumbnail': bc_info['poster'],
}
| from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
class AzubuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
'info_dict': {
'id': '15575',
'ext': 'mp4',
'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1417523507.334,
'upload_date': '20141202',
'duration': 9988.7,
'uploader': 'GSL',
'uploader_id': 414310,
'view_count': int,
},
},
{
'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
'info_dict': {
'id': '9344',
'ext': 'mp4',
'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1410530893.320,
'upload_date': '20140912',
'duration': 172.385,
'uploader': 'FnaticTV',
'uploader_id': 272749,
'view_count': int,
},
'skip': 'Channel offline',
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
title = data['title'].strip()
description = data.get('description')
thumbnail = data.get('thumbnail')
view_count = data.get('view_count')
user = data.get('user', {})
uploader = user.get('username')
uploader_id = user.get('id')
stream_params = json.loads(data['stream_params'])
timestamp = float_or_none(stream_params.get('creationDate'), 1000)
duration = float_or_none(stream_params.get('length'), 1000)
renditions = stream_params.get('renditions') or []
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
if video:
renditions.append(video)
if not renditions and not user.get('channel', {}).get('is_live', True):
raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True)
formats = [{
'url': fmt['url'],
'width': fmt['frameWidth'],
'height': fmt['frameHeight'],
'vbr': float_or_none(fmt['encodingRate'], 1000),
'filesize': fmt['size'],
'vcodec': fmt['videoCodec'],
'container': fmt['videoContainer'],
} for fmt in renditions if fmt['url']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'formats': formats,
}
class AzubuLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.tv/(?P<id>[^/]+)$'
_TEST = {
'url': 'http://www.azubu.tv/MarsTVMDLen',
'only_matching': True,
}
def _real_extract(self, url):
user = self._match_id(url)
info = self._download_json(
'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
user)['data']
if info['type'] != 'STREAM':
raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
req = sanitized_Request(
'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
bc_info = self._download_json(req, user)
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
self._sort_formats(formats)
return {
'id': info['id'],
'title': self._live_title(info['title']),
'uploader_id': user,
'formats': formats,
'is_live': True,
'thumbnail': bc_info['poster'],
} | en | 0.622692 | #!/play/(?P<id>\d+)' #!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1', #!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-', | 2.194637 | 2 |
conda_build/main_develop.py | dan-blanchard/conda-build | 0 | 7489 | <reponame>dan-blanchard/conda-build
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
import sys
from os.path import join, isdir, abspath, expanduser, exists
import shutil
from conda.cli.common import add_parser_prefix, get_prefix
from conda.cli.conda_argparse import ArgumentParser
from conda_build.main_build import args_func
from conda_build.post import mk_relative_osx
from conda_build.utils import _check_call, rec_glob
from conda.install import linked
def main():
p = ArgumentParser(
description="""
Install a Python package in 'development mode'.
This works by creating a conda.pth file in site-packages."""
# TODO: Use setup.py to determine any entry-points to install.
)
p.add_argument(
'source',
action="store",
metavar='PATH',
nargs='+',
help="Path to the source directory."
)
p.add_argument('-npf', '--no-pth-file',
action='store_true',
help=("Relink compiled extension dependencies against "
"libraries found in current conda env. "
"Do not add source to conda.pth."))
p.add_argument('-b', '--build_ext',
action='store_true',
help=("Build extensions inplace, invoking: "
"python setup.py build_ext --inplace; "
"add to conda.pth; relink runtime libraries to "
"environment's lib/."))
p.add_argument('-c', '--clean',
action='store_true',
help=("Invoke clean on setup.py: "
"python setup.py clean "
"use with build_ext to clean before building."))
p.add_argument('-u', '--uninstall',
action='store_true',
help=("Removes package if installed in 'development mode' "
"by deleting path from conda.pth file. Ignore other "
"options - just uninstall and exit"))
add_parser_prefix(p)
p.set_defaults(func=execute)
args = p.parse_args()
args_func(args, p)
def relink_sharedobjects(pkg_path, build_prefix):
'''
invokes functions in post module to relink to libraries in conda env
:param pkg_path: look for shared objects to relink in pkg_path
:param build_prefix: path to conda environment which contains lib/. to find
runtime libraries.
.. note:: develop mode builds the extensions in place and makes a link to
package in site-packages/. The build_prefix points to conda environment
since runtime libraries should be loaded from environment's lib/. first
'''
# find binaries in package dir and make them relocatable
bin_files = rec_glob(pkg_path, ['.so'])
for b_file in bin_files:
if sys.platform == 'darwin':
mk_relative_osx(b_file, build_prefix)
else:
print("Nothing to do on Linux or Windows.")
def write_to_conda_pth(sp_dir, pkg_path):
'''
Append pkg_path to conda.pth in site-packages directory for current
environment. Only add path if it doens't already exist.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to append to site-packes/. dir.
'''
c_file = join(sp_dir, 'conda.pth')
with open(c_file, 'a') as f:
with open(c_file, 'r') as cf:
# make sure file exists, before we try to read from it hence nested
# in append with block
# expect conda.pth to be small so read it all in at once
pkgs_in_dev_mode = cf.readlines()
# only append pkg_path if it doesn't already exist in conda.pth
if pkg_path + '\n' in pkgs_in_dev_mode:
print("path exits, skipping " + pkg_path)
else:
f.write(pkg_path + '\n')
print("added " + pkg_path)
def get_site_pkg(prefix, py_ver):
'''
Given the path to conda environment, find the site-packages directory
:param prefix: path to conda environment. Look here for current
environment's site-packages
:returns: absolute path to site-packages directory
'''
# get site-packages directory
stdlib_dir = join(prefix, 'Lib' if sys.platform == 'win32' else
'lib/python%s' % py_ver)
sp_dir = join(stdlib_dir, 'site-packages')
return sp_dir
def get_setup_py(path_):
''' Return full path to setup.py or exit if not found '''
# build path points to source dir, builds are placed in the
setup_py = join(path_, 'setup.py')
if not exists(setup_py):
sys.exit("No setup.py found in {0}. Exiting.".format(path_))
return setup_py
def clean(setup_py):
'''
This invokes:
$ python setup.py clean
:param setup_py: path to setup.py
'''
# first call setup.py clean
cmd = ['python', setup_py, 'clean']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def build_ext(setup_py):
'''
Define a develop function - similar to build function
todo: need to test on win32 and linux
It invokes:
$ python setup.py build_ext --inplace
:param setup_py: path to setup.py
'''
# next call setup.py develop
cmd = ['python', setup_py, 'build_ext', '--inplace']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def uninstall(sp_dir, pkg_path):
'''
Look for pkg_path in conda.pth file in site-packages directory and remove
it. If pkg_path is not found in conda.pth, it means package is not
installed in 'development mode' via conda develop.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to be uninstalled.
'''
o_c_pth = join(sp_dir, 'conda.pth')
n_c_pth = join(sp_dir, 'conda.pth.temp')
found = False
with open(n_c_pth, 'w') as new_c:
with open(o_c_pth, 'r') as orig_c:
for line in orig_c:
if line != pkg_path + '\n':
new_c.write(line)
else:
print("uninstalled: " + pkg_path)
found = True
if not found:
print("conda.pth does not contain path: " + pkg_path)
print("package not installed via conda develop")
shutil.move(n_c_pth, o_c_pth)
def execute(args, parser):
prefix = get_prefix(args)
if not isdir(prefix):
sys.exit("""\
Error: environment does not exist: %s
#
# Use 'conda create' to create the environment first.
#""" % prefix)
for package in linked(prefix):
name, ver, _ = package .rsplit('-', 2)
if name == 'python':
py_ver = ver[:3] # x.y
break
else:
raise RuntimeError("python is not installed in %s" % prefix)
# current environment's site-packages directory
sp_dir = get_site_pkg(prefix, py_ver)
for path in args.source:
pkg_path = abspath(expanduser(path))
if args.uninstall:
# uninstall then exit - does not do any other operations
uninstall(sp_dir, pkg_path)
sys.exit(0)
if args.clean or args.build_ext:
setup_py = get_setup_py(pkg_path)
if args.clean:
clean(setup_py)
if not args.build_ext:
sys.exit(0)
# build extensions before adding to conda.pth
if args.build_ext:
build_ext(setup_py)
if not args.no_pth_file:
write_to_conda_pth(sp_dir, pkg_path)
# go through the source looking for compiled extensions and make sure
# they use the conda environment for loading libraries at runtime
relink_sharedobjects(pkg_path, prefix)
print("completed operation for: " + pkg_path)
if __name__ == '__main__':
main()
| # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
import sys
from os.path import join, isdir, abspath, expanduser, exists
import shutil
from conda.cli.common import add_parser_prefix, get_prefix
from conda.cli.conda_argparse import ArgumentParser
from conda_build.main_build import args_func
from conda_build.post import mk_relative_osx
from conda_build.utils import _check_call, rec_glob
from conda.install import linked
def main():
p = ArgumentParser(
description="""
Install a Python package in 'development mode'.
This works by creating a conda.pth file in site-packages."""
# TODO: Use setup.py to determine any entry-points to install.
)
p.add_argument(
'source',
action="store",
metavar='PATH',
nargs='+',
help="Path to the source directory."
)
p.add_argument('-npf', '--no-pth-file',
action='store_true',
help=("Relink compiled extension dependencies against "
"libraries found in current conda env. "
"Do not add source to conda.pth."))
p.add_argument('-b', '--build_ext',
action='store_true',
help=("Build extensions inplace, invoking: "
"python setup.py build_ext --inplace; "
"add to conda.pth; relink runtime libraries to "
"environment's lib/."))
p.add_argument('-c', '--clean',
action='store_true',
help=("Invoke clean on setup.py: "
"python setup.py clean "
"use with build_ext to clean before building."))
p.add_argument('-u', '--uninstall',
action='store_true',
help=("Removes package if installed in 'development mode' "
"by deleting path from conda.pth file. Ignore other "
"options - just uninstall and exit"))
add_parser_prefix(p)
p.set_defaults(func=execute)
args = p.parse_args()
args_func(args, p)
def relink_sharedobjects(pkg_path, build_prefix):
'''
invokes functions in post module to relink to libraries in conda env
:param pkg_path: look for shared objects to relink in pkg_path
:param build_prefix: path to conda environment which contains lib/. to find
runtime libraries.
.. note:: develop mode builds the extensions in place and makes a link to
package in site-packages/. The build_prefix points to conda environment
since runtime libraries should be loaded from environment's lib/. first
'''
# find binaries in package dir and make them relocatable
bin_files = rec_glob(pkg_path, ['.so'])
for b_file in bin_files:
if sys.platform == 'darwin':
mk_relative_osx(b_file, build_prefix)
else:
print("Nothing to do on Linux or Windows.")
def write_to_conda_pth(sp_dir, pkg_path):
'''
Append pkg_path to conda.pth in site-packages directory for current
environment. Only add path if it doens't already exist.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to append to site-packes/. dir.
'''
c_file = join(sp_dir, 'conda.pth')
with open(c_file, 'a') as f:
with open(c_file, 'r') as cf:
# make sure file exists, before we try to read from it hence nested
# in append with block
# expect conda.pth to be small so read it all in at once
pkgs_in_dev_mode = cf.readlines()
# only append pkg_path if it doesn't already exist in conda.pth
if pkg_path + '\n' in pkgs_in_dev_mode:
print("path exits, skipping " + pkg_path)
else:
f.write(pkg_path + '\n')
print("added " + pkg_path)
def get_site_pkg(prefix, py_ver):
'''
Given the path to conda environment, find the site-packages directory
:param prefix: path to conda environment. Look here for current
environment's site-packages
:returns: absolute path to site-packages directory
'''
# get site-packages directory
stdlib_dir = join(prefix, 'Lib' if sys.platform == 'win32' else
'lib/python%s' % py_ver)
sp_dir = join(stdlib_dir, 'site-packages')
return sp_dir
def get_setup_py(path_):
''' Return full path to setup.py or exit if not found '''
# build path points to source dir, builds are placed in the
setup_py = join(path_, 'setup.py')
if not exists(setup_py):
sys.exit("No setup.py found in {0}. Exiting.".format(path_))
return setup_py
def clean(setup_py):
'''
This invokes:
$ python setup.py clean
:param setup_py: path to setup.py
'''
# first call setup.py clean
cmd = ['python', setup_py, 'clean']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def build_ext(setup_py):
'''
Define a develop function - similar to build function
todo: need to test on win32 and linux
It invokes:
$ python setup.py build_ext --inplace
:param setup_py: path to setup.py
'''
# next call setup.py develop
cmd = ['python', setup_py, 'build_ext', '--inplace']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def uninstall(sp_dir, pkg_path):
'''
Look for pkg_path in conda.pth file in site-packages directory and remove
it. If pkg_path is not found in conda.pth, it means package is not
installed in 'development mode' via conda develop.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to be uninstalled.
'''
o_c_pth = join(sp_dir, 'conda.pth')
n_c_pth = join(sp_dir, 'conda.pth.temp')
found = False
with open(n_c_pth, 'w') as new_c:
with open(o_c_pth, 'r') as orig_c:
for line in orig_c:
if line != pkg_path + '\n':
new_c.write(line)
else:
print("uninstalled: " + pkg_path)
found = True
if not found:
print("conda.pth does not contain path: " + pkg_path)
print("package not installed via conda develop")
shutil.move(n_c_pth, o_c_pth)
def execute(args, parser):
prefix = get_prefix(args)
if not isdir(prefix):
sys.exit("""\
Error: environment does not exist: %s
#
# Use 'conda create' to create the environment first.
#""" % prefix)
for package in linked(prefix):
name, ver, _ = package .rsplit('-', 2)
if name == 'python':
py_ver = ver[:3] # x.y
break
else:
raise RuntimeError("python is not installed in %s" % prefix)
# current environment's site-packages directory
sp_dir = get_site_pkg(prefix, py_ver)
for path in args.source:
pkg_path = abspath(expanduser(path))
if args.uninstall:
# uninstall then exit - does not do any other operations
uninstall(sp_dir, pkg_path)
sys.exit(0)
if args.clean or args.build_ext:
setup_py = get_setup_py(pkg_path)
if args.clean:
clean(setup_py)
if not args.build_ext:
sys.exit(0)
# build extensions before adding to conda.pth
if args.build_ext:
build_ext(setup_py)
if not args.no_pth_file:
write_to_conda_pth(sp_dir, pkg_path)
# go through the source looking for compiled extensions and make sure
# they use the conda environment for loading libraries at runtime
relink_sharedobjects(pkg_path, prefix)
print("completed operation for: " + pkg_path)
if __name__ == '__main__':
main() | en | 0.842154 | # (c) Continuum Analytics, Inc. / http://continuum.io # All Rights Reserved # # conda is distributed under the terms of the BSD 3-clause license. # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. Install a Python package in 'development mode'. This works by creating a conda.pth file in site-packages. # TODO: Use setup.py to determine any entry-points to install. invokes functions in post module to relink to libraries in conda env :param pkg_path: look for shared objects to relink in pkg_path :param build_prefix: path to conda environment which contains lib/. to find runtime libraries. .. note:: develop mode builds the extensions in place and makes a link to package in site-packages/. The build_prefix points to conda environment since runtime libraries should be loaded from environment's lib/. first # find binaries in package dir and make them relocatable Append pkg_path to conda.pth in site-packages directory for current environment. Only add path if it doens't already exist. :param sp_dir: path to site-packages/. directory :param pkg_path: the package path to append to site-packes/. dir. # make sure file exists, before we try to read from it hence nested # in append with block # expect conda.pth to be small so read it all in at once # only append pkg_path if it doesn't already exist in conda.pth Given the path to conda environment, find the site-packages directory :param prefix: path to conda environment. Look here for current environment's site-packages :returns: absolute path to site-packages directory # get site-packages directory Return full path to setup.py or exit if not found # build path points to source dir, builds are placed in the This invokes: $ python setup.py clean :param setup_py: path to setup.py # first call setup.py clean Define a develop function - similar to build function todo: need to test on win32 and linux It invokes: $ python setup.py build_ext --inplace :param setup_py: path to setup.py # next call setup.py develop Look for pkg_path in conda.pth file in site-packages directory and remove it. If pkg_path is not found in conda.pth, it means package is not installed in 'development mode' via conda develop. :param sp_dir: path to site-packages/. directory :param pkg_path: the package path to be uninstalled. \ Error: environment does not exist: %s # # Use 'conda create' to create the environment first. # # x.y # current environment's site-packages directory # uninstall then exit - does not do any other operations # build extensions before adding to conda.pth # go through the source looking for compiled extensions and make sure # they use the conda environment for loading libraries at runtime | 1.887541 | 2 |
benchmarking/experiments/sanity_check.py | ltgoslo/norBERT | 19 | 7490 | #!/bin/env python3
from transformers import TFBertForTokenClassification
from data_preparation.data_preparation_pos import MBERTTokenizer as MBERT_Tokenizer_pos
import sys
if __name__ == "__main__":
if len(sys.argv) > 1:
modelname = sys.argv[1]
else:
modelname = "ltgoslo/norbert"
model = TFBertForTokenClassification.from_pretrained(modelname, from_pt=True)
tokenizer = MBERT_Tokenizer_pos.from_pretrained(modelname, do_lower_case=False)
print(tokenizer)
| #!/bin/env python3
from transformers import TFBertForTokenClassification
from data_preparation.data_preparation_pos import MBERTTokenizer as MBERT_Tokenizer_pos
import sys
if __name__ == "__main__":
if len(sys.argv) > 1:
modelname = sys.argv[1]
else:
modelname = "ltgoslo/norbert"
model = TFBertForTokenClassification.from_pretrained(modelname, from_pt=True)
tokenizer = MBERT_Tokenizer_pos.from_pretrained(modelname, do_lower_case=False)
print(tokenizer)
| ru | 0.167759 | #!/bin/env python3 | 2.301654 | 2 |
nca47/api/controllers/v1/firewall/securityZone.py | WosunOO/nca_xianshu | 0 | 7491 | <filename>nca47/api/controllers/v1/firewall/securityZone.py
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import base
from nca47.common.i18n import _
from nca47.common.i18n import _LI, _LE
from nca47.common.exception import Nca47Exception
from oslo_log import log
from nca47.api.controllers.v1 import tools
from nca47.manager.central import CentralManager
from nca47.common.exception import ParamFormatError
from amqp.five import string
from nca47.common.exception import BadRequest
from oslo_messaging import RemoteError
from nca47.common import exception
LOG = log.getLogger(__name__)
class SecurityZoneController(base.BaseRestController):
def __init__(self):
self.manager = CentralManager.get_instance()
super(SecurityZoneController, self).__init__()
def create(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone create", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone',
'name', 'ifnames', 'priority', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
LOG.info(_LI("input the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
values["name"] = (values["tenant_id"] + "_" +
values["network_zone"] +
"_" + values["name"])
response = self.manager.create_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def remove(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("delete the SecurityZone values with dic forma \
is %(json)s"),
{"json": body_values})
response = self.manager.del_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def list(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone getAll", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name',
'network_zone', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
# get_all the SecurityZone values with dic format
LOG.info(_LI("get_all the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZones(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def show(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone get", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['id']
values = tools.validat_values(body_values, valid_attributes)
# get the staticnat values with dic format
LOG.info(_LI("get the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def addif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone add vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic formatO is\
%(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"] + " already exists")
return tools.ret_info("400", message)
response.ifnames.append(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def delif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] not in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"]+" don't exist!")
return tools.ret_info("400", message)
response.ifnames.remove(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
| <filename>nca47/api/controllers/v1/firewall/securityZone.py
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import base
from nca47.common.i18n import _
from nca47.common.i18n import _LI, _LE
from nca47.common.exception import Nca47Exception
from oslo_log import log
from nca47.api.controllers.v1 import tools
from nca47.manager.central import CentralManager
from nca47.common.exception import ParamFormatError
from amqp.five import string
from nca47.common.exception import BadRequest
from oslo_messaging import RemoteError
from nca47.common import exception
LOG = log.getLogger(__name__)
class SecurityZoneController(base.BaseRestController):
def __init__(self):
self.manager = CentralManager.get_instance()
super(SecurityZoneController, self).__init__()
def create(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone create", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone',
'name', 'ifnames', 'priority', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
LOG.info(_LI("input the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
values["name"] = (values["tenant_id"] + "_" +
values["network_zone"] +
"_" + values["name"])
response = self.manager.create_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def remove(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("delete the SecurityZone values with dic forma \
is %(json)s"),
{"json": body_values})
response = self.manager.del_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def list(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone getAll", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name',
'network_zone', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
# get_all the SecurityZone values with dic format
LOG.info(_LI("get_all the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZones(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def show(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone get", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['id']
values = tools.validat_values(body_values, valid_attributes)
# get the staticnat values with dic format
LOG.info(_LI("get the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def addif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone add vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic formatO is\
%(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"] + " already exists")
return tools.ret_info("400", message)
response.ifnames.append(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def delif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] not in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"]+" don't exist!")
return tools.ret_info("400", message)
response.ifnames.remove(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
| en | 0.299055 | # input the SecurityZone values with dic format # get_all the SecurityZone values with dic format # get the staticnat values with dic format # input the SecurityZone values with dic format # input the SecurityZone values with dic format | 2.078423 | 2 |
mlmodels/model_tch/nbeats/model.py | gitter-badger/mlmodels | 1 | 7492 | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def seasonality_model(thetas, t, device):
p = thetas.size()[-1]
assert p < 10, 'thetas_dim is too big.'
p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
s1 = torch.tensor([np.cos(2 * np.pi * i * t) for i in range(p1)]).float() # H/2-1
s2 = torch.tensor([np.sin(2 * np.pi * i * t) for i in range(p2)]).float()
S = torch.cat([s1, s2])
return thetas.mm(S.to(device))
def trend_model(thetas, t, device):
p = thetas.size()[-1]
assert p <= 4, 'thetas_dim is too big.'
T = torch.tensor([t ** i for i in range(p)]).float()
return thetas.mm(T.to(device))
def linspace(backcast_length, forecast_length):
lin_space = np.linspace(-backcast_length, forecast_length, backcast_length + forecast_length)
b_ls = lin_space[:backcast_length]
f_ls = lin_space[backcast_length:]
return b_ls, f_ls
class Block(nn.Module):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5, share_thetas=False):
super(Block, self).__init__()
self.units = units
self.thetas_dim = thetas_dim
self.backcast_length = backcast_length
self.forecast_length = forecast_length
self.share_thetas = share_thetas
self.fc1 = nn.Linear(backcast_length, units)
self.fc2 = nn.Linear(units, units)
self.fc3 = nn.Linear(units, units)
self.fc4 = nn.Linear(units, units)
self.device = device
self.backcast_linspace, self.forecast_linspace = linspace(backcast_length, forecast_length)
if share_thetas:
self.theta_f_fc = self.theta_b_fc = nn.Linear(units, thetas_dim)
else:
self.theta_b_fc = nn.Linear(units, thetas_dim)
self.theta_f_fc = nn.Linear(units, thetas_dim)
def forward(self, x):
x = F.relu(self.fc1(x.to(self.device)))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return x
def __str__(self):
block_type = type(self).__name__
return f'{block_type}(units={self.units}, thetas_dim={self.thetas_dim}, ' \
f'backcast_length={self.backcast_length}, forecast_length={self.forecast_length}, ' \
f'share_thetas={self.share_thetas}) at @{id(self)}'
class SeasonalityBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(SeasonalityBlock, self).__init__(units, thetas_dim, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(SeasonalityBlock, self).forward(x)
backcast = seasonality_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = seasonality_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class TrendBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(TrendBlock, self).__init__(units, thetas_dim, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(TrendBlock, self).forward(x)
backcast = trend_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = trend_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class GenericBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(GenericBlock, self).__init__(units, thetas_dim, device, backcast_length, forecast_length)
self.backcast_fc = nn.Linear(thetas_dim, backcast_length)
self.forecast_fc = nn.Linear(thetas_dim, forecast_length)
def forward(self, x):
# no constraint for generic arch.
x = super(GenericBlock, self).forward(x)
theta_b = F.relu(self.theta_b_fc(x))
theta_f = F.relu(self.theta_f_fc(x))
backcast = self.backcast_fc(theta_b) # generic. 3.3.
forecast = self.forecast_fc(theta_f) # generic. 3.3.
return backcast, forecast
class NBeatsNet(nn.Module):
SEASONALITY_BLOCK = 'seasonality'
TREND_BLOCK = 'trend'
GENERIC_BLOCK = 'generic'
def __init__(self,
device,
stack_types=[TREND_BLOCK, SEASONALITY_BLOCK],
nb_blocks_per_stack=3,
forecast_length=5,
backcast_length=10,
thetas_dims=[4, 8],
share_weights_in_stack=False,
hidden_layer_units=256, ):
super(NBeatsNet, self).__init__()
self.forecast_length = forecast_length
self.backcast_length = backcast_length
self.hidden_layer_units = hidden_layer_units
self.nb_blocks_per_stack = nb_blocks_per_stack
self.share_weights_in_stack = share_weights_in_stack
self.stack_types = stack_types
self.stacks = []
self.thetas_dim = thetas_dims
self.parameters = []
self.device = device
print(f'| N-Beats')
for stack_id in range(len(self.stack_types)):
self.stacks.append(self.create_stack(stack_id))
self.parameters = nn.ParameterList(self.parameters)
self.to(self.device)
def create_stack(self, stack_id):
stack_type = self.stack_types[stack_id]
print(f'| -- Stack {stack_type.title()} (#{stack_id}) (share_weights_in_stack={self.share_weights_in_stack})')
blocks = []
for block_id in range(self.nb_blocks_per_stack):
block_init = NBeatsNet.select_block(stack_type)
if self.share_weights_in_stack and block_id != 0:
block = blocks[-1] # pick up the last one to make the
else:
block = block_init(self.hidden_layer_units, self.thetas_dim[stack_id],
self.device, self.backcast_length, self.forecast_length)
self.parameters.extend(block.parameters())
print(f' | -- {block}')
blocks.append(block)
return blocks
@staticmethod
def select_block(block_type):
if block_type == NBeatsNet.SEASONALITY_BLOCK:
return SeasonalityBlock
elif block_type == NBeatsNet.TREND_BLOCK:
return TrendBlock
else:
return GenericBlock
def forward(self, backcast):
forecast = torch.zeros(size=(backcast.size()[0], self.forecast_length,)) # maybe batch size here.
for stack_id in range(len(self.stacks)):
for block_id in range(len(self.stacks[stack_id])):
b, f = self.stacks[stack_id][block_id](backcast)
backcast = backcast.to(self.device) - b
forecast = forecast.to(self.device) + f
return backcast, forecast
| import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def seasonality_model(thetas, t, device):
p = thetas.size()[-1]
assert p < 10, 'thetas_dim is too big.'
p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
s1 = torch.tensor([np.cos(2 * np.pi * i * t) for i in range(p1)]).float() # H/2-1
s2 = torch.tensor([np.sin(2 * np.pi * i * t) for i in range(p2)]).float()
S = torch.cat([s1, s2])
return thetas.mm(S.to(device))
def trend_model(thetas, t, device):
p = thetas.size()[-1]
assert p <= 4, 'thetas_dim is too big.'
T = torch.tensor([t ** i for i in range(p)]).float()
return thetas.mm(T.to(device))
def linspace(backcast_length, forecast_length):
lin_space = np.linspace(-backcast_length, forecast_length, backcast_length + forecast_length)
b_ls = lin_space[:backcast_length]
f_ls = lin_space[backcast_length:]
return b_ls, f_ls
class Block(nn.Module):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5, share_thetas=False):
super(Block, self).__init__()
self.units = units
self.thetas_dim = thetas_dim
self.backcast_length = backcast_length
self.forecast_length = forecast_length
self.share_thetas = share_thetas
self.fc1 = nn.Linear(backcast_length, units)
self.fc2 = nn.Linear(units, units)
self.fc3 = nn.Linear(units, units)
self.fc4 = nn.Linear(units, units)
self.device = device
self.backcast_linspace, self.forecast_linspace = linspace(backcast_length, forecast_length)
if share_thetas:
self.theta_f_fc = self.theta_b_fc = nn.Linear(units, thetas_dim)
else:
self.theta_b_fc = nn.Linear(units, thetas_dim)
self.theta_f_fc = nn.Linear(units, thetas_dim)
def forward(self, x):
x = F.relu(self.fc1(x.to(self.device)))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return x
def __str__(self):
block_type = type(self).__name__
return f'{block_type}(units={self.units}, thetas_dim={self.thetas_dim}, ' \
f'backcast_length={self.backcast_length}, forecast_length={self.forecast_length}, ' \
f'share_thetas={self.share_thetas}) at @{id(self)}'
class SeasonalityBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(SeasonalityBlock, self).__init__(units, thetas_dim, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(SeasonalityBlock, self).forward(x)
backcast = seasonality_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = seasonality_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class TrendBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(TrendBlock, self).__init__(units, thetas_dim, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(TrendBlock, self).forward(x)
backcast = trend_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = trend_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class GenericBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5):
super(GenericBlock, self).__init__(units, thetas_dim, device, backcast_length, forecast_length)
self.backcast_fc = nn.Linear(thetas_dim, backcast_length)
self.forecast_fc = nn.Linear(thetas_dim, forecast_length)
def forward(self, x):
# no constraint for generic arch.
x = super(GenericBlock, self).forward(x)
theta_b = F.relu(self.theta_b_fc(x))
theta_f = F.relu(self.theta_f_fc(x))
backcast = self.backcast_fc(theta_b) # generic. 3.3.
forecast = self.forecast_fc(theta_f) # generic. 3.3.
return backcast, forecast
class NBeatsNet(nn.Module):
SEASONALITY_BLOCK = 'seasonality'
TREND_BLOCK = 'trend'
GENERIC_BLOCK = 'generic'
def __init__(self,
device,
stack_types=[TREND_BLOCK, SEASONALITY_BLOCK],
nb_blocks_per_stack=3,
forecast_length=5,
backcast_length=10,
thetas_dims=[4, 8],
share_weights_in_stack=False,
hidden_layer_units=256, ):
super(NBeatsNet, self).__init__()
self.forecast_length = forecast_length
self.backcast_length = backcast_length
self.hidden_layer_units = hidden_layer_units
self.nb_blocks_per_stack = nb_blocks_per_stack
self.share_weights_in_stack = share_weights_in_stack
self.stack_types = stack_types
self.stacks = []
self.thetas_dim = thetas_dims
self.parameters = []
self.device = device
print(f'| N-Beats')
for stack_id in range(len(self.stack_types)):
self.stacks.append(self.create_stack(stack_id))
self.parameters = nn.ParameterList(self.parameters)
self.to(self.device)
def create_stack(self, stack_id):
stack_type = self.stack_types[stack_id]
print(f'| -- Stack {stack_type.title()} (#{stack_id}) (share_weights_in_stack={self.share_weights_in_stack})')
blocks = []
for block_id in range(self.nb_blocks_per_stack):
block_init = NBeatsNet.select_block(stack_type)
if self.share_weights_in_stack and block_id != 0:
block = blocks[-1] # pick up the last one to make the
else:
block = block_init(self.hidden_layer_units, self.thetas_dim[stack_id],
self.device, self.backcast_length, self.forecast_length)
self.parameters.extend(block.parameters())
print(f' | -- {block}')
blocks.append(block)
return blocks
@staticmethod
def select_block(block_type):
if block_type == NBeatsNet.SEASONALITY_BLOCK:
return SeasonalityBlock
elif block_type == NBeatsNet.TREND_BLOCK:
return TrendBlock
else:
return GenericBlock
def forward(self, backcast):
forecast = torch.zeros(size=(backcast.size()[0], self.forecast_length,)) # maybe batch size here.
for stack_id in range(len(self.stacks)):
for block_id in range(len(self.stacks[stack_id])):
b, f = self.stacks[stack_id][block_id](backcast)
backcast = backcast.to(self.device) - b
forecast = forecast.to(self.device) + f
return backcast, forecast
| en | 0.78097 | # H/2-1 # no constraint for generic arch. # generic. 3.3. # generic. 3.3. #{stack_id}) (share_weights_in_stack={self.share_weights_in_stack})') # pick up the last one to make the # maybe batch size here. | 2.439188 | 2 |
BACKPROPAGATION/Backprop.py | chaya-v/AI-ML-Lab-Programs | 2 | 7493 | from math import exp
from random import seed
from random import random
def initialize_network(n_inputs, n_hidden, n_outputs):
network = list()
hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation
def transfer(activation):
return 1.0 / (1.0 + exp(-activation))
def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs
def transfer_derivative(output):
return output * (1.0 - output)
def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i - 1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
neuron['weights'][-1] += l_rate * neuron['delta']
def train_network(network, train, l_rate, n_epoch, n_outputs):
for epoch in range(n_epoch):
sum_error = 0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[row[-1]] = 1
sum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
backward_propagate_error(network, expected)
update_weights(network, row, l_rate)
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))
seed(1)
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
network = initialize_network(n_inputs, 2, n_outputs)
train_network(network, dataset, 0.5, 30, n_outputs)
for layer in network:
print(layer) | from math import exp
from random import seed
from random import random
def initialize_network(n_inputs, n_hidden, n_outputs):
network = list()
hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation
def transfer(activation):
return 1.0 / (1.0 + exp(-activation))
def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs
def transfer_derivative(output):
return output * (1.0 - output)
def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i - 1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
neuron['weights'][-1] += l_rate * neuron['delta']
def train_network(network, train, l_rate, n_epoch, n_outputs):
for epoch in range(n_epoch):
sum_error = 0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[row[-1]] = 1
sum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
backward_propagate_error(network, expected)
update_weights(network, row, l_rate)
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))
seed(1)
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
network = initialize_network(n_inputs, 2, n_outputs)
train_network(network, dataset, 0.5, 30, n_outputs)
for layer in network:
print(layer) | none | 1 | 3.356484 | 3 |
|
tests/multi_design_test.py | benoitc/hypercouch | 3 | 7494 | """\
Copyright (c) 2009 <NAME> <<EMAIL>>
This file is part of hypercouch which is released uner the MIT license.
"""
import time
import unittest
import couchdb
COUCHURI = "http://127.0.0.1:5984/"
TESTDB = "hyper_tests"
class MultiDesignTest(unittest.TestCase):
def setUp(self):
self.srv = couchdb.Server(COUCHURI)
if TESTDB in self.srv:
del self.srv[TESTDB]
self.db = self.srv.create(TESTDB)
self.db["_design/test1"] = {
"ft_index": """\
function(doc) {
if(doc.body) index(doc.body);
if(doc.foo != undefined) property("foo", doc.foo);
}
"""
}
self.db["_design/test2"] = {
"ft_index": """\
function(doc) {
if(doc.bar) property("bar", doc.bar)
}
"""
}
self._wait()
def tearDown(self):
del self.srv[TESTDB]
def _query(self, **kwargs):
resp, data = self.db.resource.get("_fti", **kwargs)
return data
def _wait(self, expect=0, retries=10):
data = self._query(q="*.**")
while retries > 0 and len(data["rows"]) != expect:
retries -= 1
time.sleep(0.2)
data = self._query(q="*.**")
if retries < 1:
raise RuntimeError("Failed to find expected index state.")
def test_attr(self):
docs = [{"_id": str(i), "body": "This is document %d" % i, "foo": i, "bar": str(i*i)} for i in range(10)]
self.db.update(docs)
self._wait(expect=10)
data = self._query(q="*.**", foo="NUMEQ 3", bar="NUMEQ 9")
self.assertEqual(data["total_rows"], 1)
self.assertEqual(data["rows"][0]["id"], "3")
data = self._query(q="*.**")
self.assertEqual(len(data["rows"]), 10)
for row in data["rows"]:
self.assertEqual(int(row["foo"]) ** 2, int(row["bar"]))
| """\
Copyright (c) 2009 <NAME> <<EMAIL>>
This file is part of hypercouch which is released uner the MIT license.
"""
import time
import unittest
import couchdb
COUCHURI = "http://127.0.0.1:5984/"
TESTDB = "hyper_tests"
class MultiDesignTest(unittest.TestCase):
def setUp(self):
self.srv = couchdb.Server(COUCHURI)
if TESTDB in self.srv:
del self.srv[TESTDB]
self.db = self.srv.create(TESTDB)
self.db["_design/test1"] = {
"ft_index": """\
function(doc) {
if(doc.body) index(doc.body);
if(doc.foo != undefined) property("foo", doc.foo);
}
"""
}
self.db["_design/test2"] = {
"ft_index": """\
function(doc) {
if(doc.bar) property("bar", doc.bar)
}
"""
}
self._wait()
def tearDown(self):
del self.srv[TESTDB]
def _query(self, **kwargs):
resp, data = self.db.resource.get("_fti", **kwargs)
return data
def _wait(self, expect=0, retries=10):
data = self._query(q="*.**")
while retries > 0 and len(data["rows"]) != expect:
retries -= 1
time.sleep(0.2)
data = self._query(q="*.**")
if retries < 1:
raise RuntimeError("Failed to find expected index state.")
def test_attr(self):
docs = [{"_id": str(i), "body": "This is document %d" % i, "foo": i, "bar": str(i*i)} for i in range(10)]
self.db.update(docs)
self._wait(expect=10)
data = self._query(q="*.**", foo="NUMEQ 3", bar="NUMEQ 9")
self.assertEqual(data["total_rows"], 1)
self.assertEqual(data["rows"][0]["id"], "3")
data = self._query(q="*.**")
self.assertEqual(len(data["rows"]), 10)
for row in data["rows"]:
self.assertEqual(int(row["foo"]) ** 2, int(row["bar"]))
| en | 0.327863 | \ Copyright (c) 2009 <NAME> <<EMAIL>> This file is part of hypercouch which is released uner the MIT license. \ function(doc) { if(doc.body) index(doc.body); if(doc.foo != undefined) property("foo", doc.foo); } \ function(doc) { if(doc.bar) property("bar", doc.bar) } | 2.367818 | 2 |
xled_plus/samples/colmeander.py | DanieleMancini/xled_plus | 0 | 7495 | <filename>xled_plus/samples/colmeander.py<gh_stars>0
from .sample_setup import *
ctr = setup_control()
eff = ColorMeanderEffect(ctr, "solid")
eff.launch_rt()
input()
eff.stop_rt()
ctr.turn_off()
| <filename>xled_plus/samples/colmeander.py<gh_stars>0
from .sample_setup import *
ctr = setup_control()
eff = ColorMeanderEffect(ctr, "solid")
eff.launch_rt()
input()
eff.stop_rt()
ctr.turn_off()
| none | 1 | 1.428189 | 1 |
|
python/dgl/nn/pytorch/sparse_emb.py | wcyjames/dgl | 0 | 7496 | <filename>python/dgl/nn/pytorch/sparse_emb.py
"""Torch NodeEmbedding."""
from datetime import timedelta
import torch as th
from ...backend import pytorch as F
from ...utils import get_shared_mem_array, create_shared_mem_array
_STORE = None
class NodeEmbedding: # NodeEmbedding
'''Class for storing node embeddings.
The class is optimized for training large-scale node embeddings. It updates the embedding in
a sparse way and can scale to graphs with millions of nodes. It also supports partitioning
to multiple GPUs (on a single machine) for more acceleration. It does not support partitioning
across machines.
Currently, DGL provides two optimizers that work with this NodeEmbedding
class: ``SparseAdagrad`` and ``SparseAdam``.
The implementation is based on torch.distributed package. It depends on the pytorch
default distributed process group to collect multi-process information and uses
``torch.distributed.TCPStore`` to share meta-data information across multiple gpu processes.
It use the local address of '127.0.0.1:12346' to initialize the TCPStore.
Parameters
----------
num_embeddings : int
The number of embeddings. Currently, the number of embeddings has to be the same as
the number of nodes.
embedding_dim : int
The dimension size of embeddings.
name : str
The name of the embeddings. The name should uniquely identify the embeddings in the system.
init_func : callable, optional
The function to create the initial data. If the init function is not provided,
the values of the embeddings are initialized to zero.
Examples
--------
Before launching multiple gpu processes
>>> def initializer(emb):
th.nn.init.xavier_uniform_(emb)
return emb
In each training process
>>> emb = dgl.nn.NodeEmbedding(g.number_of_nodes(), 10, 'emb', init_func=initializer)
>>> optimizer = dgl.optim.SparseAdam([emb], lr=0.001)
>>> for blocks in dataloader:
... ...
... feats = emb(nids, gpu_0)
... loss = F.sum(feats + 1, 0)
... loss.backward()
... optimizer.step()
'''
def __init__(self, num_embeddings, embedding_dim, name,
init_func=None):
global _STORE
# Check whether it is multi-gpu training or not.
if th.distributed.is_initialized():
rank = th.distributed.get_rank()
world_size = th.distributed.get_world_size()
else:
rank = -1
world_size = 0
self._rank = rank
self._world_size = world_size
host_name = '127.0.0.1'
port = 12346
if rank <= 0:
emb = create_shared_mem_array(name, (num_embeddings, embedding_dim), th.float32)
if init_func is not None:
emb = init_func(emb)
if rank == 0:
if world_size > 1:
# for multi-gpu training, setup a TCPStore for
# embeding status synchronization across GPU processes
if _STORE is None:
_STORE = th.distributed.TCPStore(
host_name, port, world_size, True, timedelta(seconds=30))
for _ in range(1, world_size):
# send embs
_STORE.set(name, name)
elif rank > 0:
# receive
if _STORE is None:
_STORE = th.distributed.TCPStore(
host_name, port, world_size, False, timedelta(seconds=30))
_STORE.wait([name])
emb = get_shared_mem_array(name, (num_embeddings, embedding_dim), th.float32)
self._store = _STORE
self._tensor = emb
self._num_embeddings = num_embeddings
self._embedding_dim = embedding_dim
self._name = name
self._optm_state = None # track optimizer state
self._trace = [] # track minibatch
def __call__(self, node_ids, device=th.device('cpu')):
"""
node_ids : th.tensor
Index of the embeddings to collect.
device : th.device
Target device to put the collected embeddings.
"""
emb = self._tensor[node_ids].to(device)
if F.is_recording():
emb = F.attach_grad(emb)
self._trace.append((node_ids.to(device, non_blocking=True), emb))
return emb
@property
def store(self):
"""Return torch.distributed.TCPStore for
meta data sharing across processes.
Returns
-------
torch.distributed.TCPStore
KVStore used for meta data sharing.
"""
return self._store
@property
def rank(self):
"""Return rank of current process.
Returns
-------
int
The rank of current process.
"""
return self._rank
@property
def world_size(self):
"""Return world size of the pytorch distributed training env.
Returns
-------
int
The world size of the pytorch distributed training env.
"""
return self._world_size
@property
def name(self):
"""Return the name of NodeEmbedding.
Returns
-------
str
The name of NodeEmbedding.
"""
return self._name
@property
def num_embeddings(self):
"""Return the number of embeddings.
Returns
-------
int
The number of embeddings.
"""
return self._num_embeddings
def set_optm_state(self, state):
"""Store the optimizer related state tensor.
Parameters
----------
state : tuple of torch.Tensor
Optimizer related state.
"""
self._optm_state = state
@property
def optm_state(self):
"""Return the optimizer related state tensor.
Returns
-------
tuple of torch.Tensor
The optimizer related state.
"""
return self._optm_state
@property
def trace(self):
"""Return a trace of the indices of embeddings
used in the training step(s).
Returns
-------
[torch.Tensor]
The indices of embeddings used in the training step(s).
"""
return self._trace
def reset_trace(self):
"""Clean up the trace of the indices of embeddings
used in the training step(s).
"""
self._trace = []
@property
def emb_tensor(self):
"""Return the tensor storing the node embeddings
Returns
-------
torch.Tensor
The tensor storing the node embeddings
"""
return self._tensor
| <filename>python/dgl/nn/pytorch/sparse_emb.py
"""Torch NodeEmbedding."""
from datetime import timedelta
import torch as th
from ...backend import pytorch as F
from ...utils import get_shared_mem_array, create_shared_mem_array
_STORE = None
class NodeEmbedding: # NodeEmbedding
'''Class for storing node embeddings.
The class is optimized for training large-scale node embeddings. It updates the embedding in
a sparse way and can scale to graphs with millions of nodes. It also supports partitioning
to multiple GPUs (on a single machine) for more acceleration. It does not support partitioning
across machines.
Currently, DGL provides two optimizers that work with this NodeEmbedding
class: ``SparseAdagrad`` and ``SparseAdam``.
The implementation is based on torch.distributed package. It depends on the pytorch
default distributed process group to collect multi-process information and uses
``torch.distributed.TCPStore`` to share meta-data information across multiple gpu processes.
It use the local address of '127.0.0.1:12346' to initialize the TCPStore.
Parameters
----------
num_embeddings : int
The number of embeddings. Currently, the number of embeddings has to be the same as
the number of nodes.
embedding_dim : int
The dimension size of embeddings.
name : str
The name of the embeddings. The name should uniquely identify the embeddings in the system.
init_func : callable, optional
The function to create the initial data. If the init function is not provided,
the values of the embeddings are initialized to zero.
Examples
--------
Before launching multiple gpu processes
>>> def initializer(emb):
th.nn.init.xavier_uniform_(emb)
return emb
In each training process
>>> emb = dgl.nn.NodeEmbedding(g.number_of_nodes(), 10, 'emb', init_func=initializer)
>>> optimizer = dgl.optim.SparseAdam([emb], lr=0.001)
>>> for blocks in dataloader:
... ...
... feats = emb(nids, gpu_0)
... loss = F.sum(feats + 1, 0)
... loss.backward()
... optimizer.step()
'''
def __init__(self, num_embeddings, embedding_dim, name,
init_func=None):
global _STORE
# Check whether it is multi-gpu training or not.
if th.distributed.is_initialized():
rank = th.distributed.get_rank()
world_size = th.distributed.get_world_size()
else:
rank = -1
world_size = 0
self._rank = rank
self._world_size = world_size
host_name = '127.0.0.1'
port = 12346
if rank <= 0:
emb = create_shared_mem_array(name, (num_embeddings, embedding_dim), th.float32)
if init_func is not None:
emb = init_func(emb)
if rank == 0:
if world_size > 1:
# for multi-gpu training, setup a TCPStore for
# embeding status synchronization across GPU processes
if _STORE is None:
_STORE = th.distributed.TCPStore(
host_name, port, world_size, True, timedelta(seconds=30))
for _ in range(1, world_size):
# send embs
_STORE.set(name, name)
elif rank > 0:
# receive
if _STORE is None:
_STORE = th.distributed.TCPStore(
host_name, port, world_size, False, timedelta(seconds=30))
_STORE.wait([name])
emb = get_shared_mem_array(name, (num_embeddings, embedding_dim), th.float32)
self._store = _STORE
self._tensor = emb
self._num_embeddings = num_embeddings
self._embedding_dim = embedding_dim
self._name = name
self._optm_state = None # track optimizer state
self._trace = [] # track minibatch
def __call__(self, node_ids, device=th.device('cpu')):
"""
node_ids : th.tensor
Index of the embeddings to collect.
device : th.device
Target device to put the collected embeddings.
"""
emb = self._tensor[node_ids].to(device)
if F.is_recording():
emb = F.attach_grad(emb)
self._trace.append((node_ids.to(device, non_blocking=True), emb))
return emb
@property
def store(self):
"""Return torch.distributed.TCPStore for
meta data sharing across processes.
Returns
-------
torch.distributed.TCPStore
KVStore used for meta data sharing.
"""
return self._store
@property
def rank(self):
"""Return rank of current process.
Returns
-------
int
The rank of current process.
"""
return self._rank
@property
def world_size(self):
"""Return world size of the pytorch distributed training env.
Returns
-------
int
The world size of the pytorch distributed training env.
"""
return self._world_size
@property
def name(self):
"""Return the name of NodeEmbedding.
Returns
-------
str
The name of NodeEmbedding.
"""
return self._name
@property
def num_embeddings(self):
"""Return the number of embeddings.
Returns
-------
int
The number of embeddings.
"""
return self._num_embeddings
def set_optm_state(self, state):
"""Store the optimizer related state tensor.
Parameters
----------
state : tuple of torch.Tensor
Optimizer related state.
"""
self._optm_state = state
@property
def optm_state(self):
"""Return the optimizer related state tensor.
Returns
-------
tuple of torch.Tensor
The optimizer related state.
"""
return self._optm_state
@property
def trace(self):
"""Return a trace of the indices of embeddings
used in the training step(s).
Returns
-------
[torch.Tensor]
The indices of embeddings used in the training step(s).
"""
return self._trace
def reset_trace(self):
"""Clean up the trace of the indices of embeddings
used in the training step(s).
"""
self._trace = []
@property
def emb_tensor(self):
"""Return the tensor storing the node embeddings
Returns
-------
torch.Tensor
The tensor storing the node embeddings
"""
return self._tensor
| en | 0.657158 | Torch NodeEmbedding. # NodeEmbedding Class for storing node embeddings. The class is optimized for training large-scale node embeddings. It updates the embedding in a sparse way and can scale to graphs with millions of nodes. It also supports partitioning to multiple GPUs (on a single machine) for more acceleration. It does not support partitioning across machines. Currently, DGL provides two optimizers that work with this NodeEmbedding class: ``SparseAdagrad`` and ``SparseAdam``. The implementation is based on torch.distributed package. It depends on the pytorch default distributed process group to collect multi-process information and uses ``torch.distributed.TCPStore`` to share meta-data information across multiple gpu processes. It use the local address of '127.0.0.1:12346' to initialize the TCPStore. Parameters ---------- num_embeddings : int The number of embeddings. Currently, the number of embeddings has to be the same as the number of nodes. embedding_dim : int The dimension size of embeddings. name : str The name of the embeddings. The name should uniquely identify the embeddings in the system. init_func : callable, optional The function to create the initial data. If the init function is not provided, the values of the embeddings are initialized to zero. Examples -------- Before launching multiple gpu processes >>> def initializer(emb): th.nn.init.xavier_uniform_(emb) return emb In each training process >>> emb = dgl.nn.NodeEmbedding(g.number_of_nodes(), 10, 'emb', init_func=initializer) >>> optimizer = dgl.optim.SparseAdam([emb], lr=0.001) >>> for blocks in dataloader: ... ... ... feats = emb(nids, gpu_0) ... loss = F.sum(feats + 1, 0) ... loss.backward() ... optimizer.step() # Check whether it is multi-gpu training or not. # for multi-gpu training, setup a TCPStore for # embeding status synchronization across GPU processes # send embs # receive # track optimizer state # track minibatch node_ids : th.tensor Index of the embeddings to collect. device : th.device Target device to put the collected embeddings. Return torch.distributed.TCPStore for meta data sharing across processes. Returns ------- torch.distributed.TCPStore KVStore used for meta data sharing. Return rank of current process. Returns ------- int The rank of current process. Return world size of the pytorch distributed training env. Returns ------- int The world size of the pytorch distributed training env. Return the name of NodeEmbedding. Returns ------- str The name of NodeEmbedding. Return the number of embeddings. Returns ------- int The number of embeddings. Store the optimizer related state tensor. Parameters ---------- state : tuple of torch.Tensor Optimizer related state. Return the optimizer related state tensor. Returns ------- tuple of torch.Tensor The optimizer related state. Return a trace of the indices of embeddings used in the training step(s). Returns ------- [torch.Tensor] The indices of embeddings used in the training step(s). Clean up the trace of the indices of embeddings used in the training step(s). Return the tensor storing the node embeddings Returns ------- torch.Tensor The tensor storing the node embeddings | 2.842093 | 3 |
tests/sentry/web/frontend/test_create_team.py | seukjung/sentry-custom | 20 | 7497 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
from sentry.testutils import TestCase, PermissionTestCase
class CreateTeamPermissionTest(PermissionTestCase):
def setUp(self):
super(CreateTeamPermissionTest, self).setUp()
self.path = reverse('sentry-create-team', args=[self.organization.slug])
def test_teamless_admin_can_load(self):
self.assert_teamless_admin_can_access(self.path)
def test_team_admin_can_load(self):
self.assert_team_admin_can_access(self.path)
def test_member_cannot_load(self):
self.assert_member_cannot_access(self.path)
def test_owner_can_load(self):
self.assert_owner_can_access(self.path)
class CreateTeamTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/create-team.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_submission(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
team = Team.objects.get(organization=organization, name='bar')
member = OrganizationMember.objects.get(
user=self.user,
organization=organization,
)
assert OrganizationMemberTeam.objects.filter(
organizationmember=member,
team=team,
is_active=True,
).exists()
redirect_uri = reverse('sentry-create-project', args=[organization.slug])
assert resp['Location'] == 'http://testserver%s?team=%s' % (
redirect_uri, team.slug)
def test_admin_can_create_team(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
admin = self.create_user('<EMAIL>')
self.create_member(
organization=organization,
user=admin,
role='admin',
teams=[],
)
self.login_as(admin)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
assert Team.objects.filter(
organization=organization,
name='bar',
).exists()
| from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
from sentry.testutils import TestCase, PermissionTestCase
class CreateTeamPermissionTest(PermissionTestCase):
def setUp(self):
super(CreateTeamPermissionTest, self).setUp()
self.path = reverse('sentry-create-team', args=[self.organization.slug])
def test_teamless_admin_can_load(self):
self.assert_teamless_admin_can_access(self.path)
def test_team_admin_can_load(self):
self.assert_team_admin_can_access(self.path)
def test_member_cannot_load(self):
self.assert_member_cannot_access(self.path)
def test_owner_can_load(self):
self.assert_owner_can_access(self.path)
class CreateTeamTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/create-team.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_submission(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
self.login_as(self.user)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
team = Team.objects.get(organization=organization, name='bar')
member = OrganizationMember.objects.get(
user=self.user,
organization=organization,
)
assert OrganizationMemberTeam.objects.filter(
organizationmember=member,
team=team,
is_active=True,
).exists()
redirect_uri = reverse('sentry-create-project', args=[organization.slug])
assert resp['Location'] == 'http://testserver%s?team=%s' % (
redirect_uri, team.slug)
def test_admin_can_create_team(self):
organization = self.create_organization()
path = reverse('sentry-create-team', args=[organization.slug])
admin = self.create_user('<EMAIL>')
self.create_member(
organization=organization,
user=admin,
role='admin',
teams=[],
)
self.login_as(admin)
resp = self.client.post(path, {
'name': 'bar',
})
assert resp.status_code == 302, resp.context['form'].errors
assert Team.objects.filter(
organization=organization,
name='bar',
).exists()
| none | 1 | 2.134494 | 2 |
|
agendamentos/migrations/0011_alter_agendamentosfuncionarios_table.py | afnmachado/univesp_pi_1 | 0 | 7498 | # Generated by Django 3.2.8 on 2021-11-29 05:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agendamentos', '0010_agendamentosfuncionarios'),
]
operations = [
migrations.AlterModelTable(
name='agendamentosfuncionarios',
table='agendamento_funcionario',
),
]
| # Generated by Django 3.2.8 on 2021-11-29 05:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agendamentos', '0010_agendamentosfuncionarios'),
]
operations = [
migrations.AlterModelTable(
name='agendamentosfuncionarios',
table='agendamento_funcionario',
),
]
| en | 0.855995 | # Generated by Django 3.2.8 on 2021-11-29 05:47 | 1.238852 | 1 |
openstack/tests/unit/metric/v1/test_capabilities.py | teresa-ho/stx-openstacksdk | 43 | 7499 | <reponame>teresa-ho/stx-openstacksdk<gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.metric.v1 import capabilities
BODY = {
'aggregation_methods': ['mean', 'max', 'avg'],
}
class TestCapabilites(testtools.TestCase):
def test_basic(self):
sot = capabilities.Capabilities()
self.assertEqual('/capabilities', sot.base_path)
self.assertEqual('metric', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_make_it(self):
sot = capabilities.Capabilities(**BODY)
self.assertEqual(BODY['aggregation_methods'],
sot.aggregation_methods)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.metric.v1 import capabilities
BODY = {
'aggregation_methods': ['mean', 'max', 'avg'],
}
class TestCapabilites(testtools.TestCase):
def test_basic(self):
sot = capabilities.Capabilities()
self.assertEqual('/capabilities', sot.base_path)
self.assertEqual('metric', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_make_it(self):
sot = capabilities.Capabilities(**BODY)
self.assertEqual(BODY['aggregation_methods'],
sot.aggregation_methods) | en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.955348 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.