repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
takashi-suehiro/rtmtools | rtc_handle_example/cin/cin.py | 1 | 5056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
\file cin.py
\brief an example RTC for rtc_handle
\date $Date$
"""
import sys
import time
sys.path.append(".")
# Import RTM module
import RTC
import OpenRTM_aist
# Import Service implementation class
# <rtc-template block="service_impl">
# </rtc-template>
# Import Service stub modules
# <rtc-template block="consumer_import">
# </rtc-template>
# This module's spesification
# <rtc-template block="module_spec">
cin_spec = ["implementation_id", "cin",
"type_name", "cin",
"description", "an example RTC for rtc_handle",
"version", "1.0.0",
"vendor", "VenderName",
"category", "cin",
"activity_type", "STATIC",
"max_instance", "1",
"language", "Python",
"lang_type", "SCRIPT",
""]
# </rtc-template>
class cin(OpenRTM_aist.DataFlowComponentBase):
"""
\class cin
\brief an example RTC for rtc_handle
"""
def __init__(self, manager):
"""
\brief constructor
\param manager Maneger Object
"""
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_str_out = RTC.TimedString(RTC.Time(0,0),0)
"""
"""
self._str_outOut = OpenRTM_aist.OutPort("str_out", self._d_str_out)
# initialize of configuration-data.
# <rtc-template block="init_conf_param">
# </rtc-template>
def onInitialize(self):
"""
The initialize action (on CREATED->ALIVE transition)
formaer rtc_init_entry()
\return RTC::ReturnCode_t
"""
# Bind variables and configuration variable
# Set InPort buffers
# Set OutPort buffers
self.addOutPort("str_out",self._str_outOut)
# Set service provider to Ports
# Set service consumers to Ports
# Set CORBA Service Ports
return RTC.RTC_OK
#def onFinalize(self, ec_id):
# """
#
# The finalize action (on ALIVE->END transition)
# formaer rtc_exiting_entry()
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onStartup(self, ec_id):
# """
#
# The startup action when ExecutionContext startup
# former rtc_starting_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onShutdown(self, ec_id):
# """
#
# The shutdown action when ExecutionContext stop
# former rtc_stopping_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onActivated(self, ec_id):
# """
#
# The activated action (Active state entry action)
# former rtc_active_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onDeactivated(self, ec_id):
# """
#
# The deactivated action (Active state exit action)
# former rtc_active_exit()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
def onExecute(self, ec_id):
"""
The execution action that is invoked periodically
former rtc_active_do()
\param ec_id target ExecutionContext Id
\return RTC::ReturnCode_t
"""
a=raw_input("input data> ")
self._d_str_out.data=a
self._str_outOut.write()
return RTC.RTC_OK
#def onAborting(self, ec_id):
# """
#
# The aborting action when main logic error occurred.
# former rtc_aborting_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onError(self, ec_id):
# """
#
# The error action in ERROR state
# former rtc_error_do()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onReset(self, ec_id):
# """
#
# The reset action that is invoked resetting
# This is same but different the former rtc_init_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onStateUpdate(self, ec_id):
# """
#
# The state update action that is invoked after onExecute() action
# no corresponding operation exists in OpenRTm-aist-0.2.0
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onRateChanged(self, ec_id):
# """
#
# The action that is invoked when execution context's rate is changed
# no corresponding operation exists in OpenRTm-aist-0.2.0
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
def cinInit(manager):
profile = OpenRTM_aist.Properties(defaults_str=cin_spec)
manager.registerFactory(profile,
cin,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
cinInit(manager)
# Create a component
comp = manager.createComponent("cin")
def main():
mgr = OpenRTM_aist.Manager.init(sys.argv)
mgr.setModuleInitProc(MyModuleInit)
mgr.activateManager()
mgr.runManager()
if __name__ == "__main__":
main()
| mit | 8,142,501,798,067,403,000 | 17.252708 | 70 | 0.630736 | false |
jregalad-o/scripts | extract_kmers.py | 1 | 2494 | ###############################################################################
# Julian Regaldo
# [email protected]
#
# Extact all kmers of length K from a reference sequence
###############################################################################
import sys
import random
import hashlib
def error(errstr,errcode):
sys.stderr.write(errstr)
sys.exit(errcode)
def usage():
sys.stderr.write("Usage: python extract_kmers.py <int K_lenght>"\
" <str input Seq>\n")
def write(outstr):
sys.stdout.write(outstr)
def readseq(inseq, startflag = 1):
"""
Read up to N bp of sequence
"""
buff = ''
buff_size = 10000
if startflag:
header = inseq.readline().rstrip()
if header[0] != '>':
usage()
error("ERROR: Incorrect header format, got:\n\t" + header + '\n',1)
else:
sys.stderr.write("Procesing:\n\t" + header + '\n')
while True:
seq = inseq.readline().rstrip()
if seq == '':
if buff == '':
return 'EOF',0
else:
return buff,0
else:
buff += seq
if len(buff) > buff_size:
startflag = 0
break
return buff,startflag
def hashmer(seq,klen,merdict,hashalg):
start = 0
end = klen
for i in xrange(0,len(seq)-klen+1):
mer = seq[start:end]
hashed = hashalg(mer)
try:
merdict[hashed.digest()] += 1
except:
merdict[hashed.digest()] = 1
start += 1
end += 1
return merdict,start
def main():
if len(sys.argv) < 3:
usage()
error("\tERROR: Incorrect number of parameters\n",1)
try:
klen = int(sys.argv[1])
inseq = open(sys.argv[2],'r')
except:
usage()
error("\tERROR: Parameters not in correct format\n",1)
digester = digester = hashlib.sha1
merdict = dict()
mernum = 0
startflag = 1
last18mer = ''
while True:
newseq,startflag = readseq(inseq,startflag = startflag)
if newseq == 'EOF':
break
# TODO: Figure out better variable names
inbatch = last18mer + newseq
merdict,start = hashmer(inbatch,19,merdict,digester)
last18mer = newseq[-18:]
mernum += start
for digest in merdict:
write(str(merdict[digest])+'\n')
if __name__ == "__main__":
main()
| mit | 566,555,367,325,854,100 | 24.44898 | 79 | 0.497594 | false |
globz-eu/formalign | functional_tests/test_implementation/steps/alignment_rendering.py | 1 | 4176 | from behave import then, use_step_matcher
from formalign.settings import TEST
from helper_funcs.helpers_test import file_to_string
from lxml import html
from io import StringIO
from functional_tests.test_implementation.alignment_rendering import alignment_formatting, get_displayed_seqs
use_step_matcher('re')
@then(r'the alignment is displayed with 80 characters per line in blocks of 10 with sequence IDs')
def check_alignment_formatting(context):
"""
tests that the alignments are displayed with the correct formatting
:param context: behave context
"""
tables = None
seqs_meta = file_to_string('spa_protein_alignment_meta.txt').splitlines()
if TEST == 'acceptance':
align_display_elem = context.browser.find_element_by_css_selector('.align_display')
align_display = context.browser.execute_script('return arguments[0].innerHTML', align_display_elem)
align_display_html = html.parse(StringIO(align_display)).getroot()
tables = align_display_html.find_class('align_table')
elif TEST == 'functional':
tables = context.display.find_class('align_table')
alignment_formatting(seqs_meta, tables)
@then(r'the expected alignments are displayed')
def check_alignment_sequences(context):
"""
tests that the expected sequences are displayed
:param context: behave context
"""
alignment = file_to_string('spa_protein_alignment_seqs.txt')
alignment_list = [[c for c in a] for a in alignment.split('\n')[:-1]]
elems = None
if TEST == 'acceptance':
align_display_elem = context.browser.find_element_by_css_selector('.align_display')
align_display = context.browser.execute_script('return arguments[0].innerHTML', align_display_elem)
align_display_html = html.parse(StringIO(align_display)).getroot()
elems = align_display_html.cssselect('tr')
elif TEST == 'functional':
elems = context.display.cssselect('tr')
re_seqs = get_displayed_seqs(elems, len(alignment_list))
for i, al_li in enumerate(alignment_list):
assert al_li == re_seqs[i], 'expected: %s\n got: %s' % (al_li, re_seqs[i])
@then(r'the expected consensus sequence is displayed')
def check_consensus_sequence(context):
"""
tests that the expected consensus sequence is displayed
:param context: behave context
"""
alignment = file_to_string('spa_protein_alignment_consens.txt')
alignment_list = [[c for c in a] for a in alignment.split('\n')[:-1]]
elems = None
if TEST == 'acceptance':
align_display_elem = context.browser.find_element_by_css_selector('.align_display')
align_display = context.browser.execute_script('return arguments[0].innerHTML', align_display_elem)
align_display_html = html.parse(StringIO(align_display)).getroot()
elems = align_display_html.cssselect('tr')
elif TEST == 'functional':
elems = context.display.cssselect('tr')
cat_re_seq = get_displayed_seqs(elems, len(alignment_list), cons=True)
cons_li = alignment_list[-1]
assert cons_li == cat_re_seq, cat_re_seq
@then(r'the sequence elements have the expected color classes')
def check_alignment_sequences_annotation(context):
"""
tests that the sequence elements (residues or bases) have the expected
color classes
:param context: behave context
"""
alignment = file_to_string('spa_protein_alignment_seqs_annot.txt')
alignment_list = [['residue S%s' % a for a in al] for al in alignment.split('\n')[:-1]]
elems = None
if TEST == 'acceptance':
align_display_elem = context.browser.find_element_by_css_selector('.align_display')
align_display = context.browser.execute_script('return arguments[0].innerHTML', align_display_elem)
align_display_html = html.parse(StringIO(align_display)).getroot()
elems = align_display_html.cssselect('tr')
elif TEST == 'functional':
elems = context.display.cssselect('tr')
re_seqs = get_displayed_seqs(elems, len(alignment_list), annot=True)
for i, al in enumerate(alignment_list):
assert al == re_seqs[i], 'expected: %s\n got: %s' % (al, re_seqs[i])
| gpl-3.0 | -8,391,172,811,090,104,000 | 41.612245 | 109 | 0.688697 | false |
xiaohutushen30/seentao-xblock-sdk | workbench/test/test_problems.py | 1 | 3519 | """Test that problems and problem submission works well."""
import time
from selenium.common.exceptions import StaleElementReferenceException
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
from bok_choy.query import BrowserQuery
class ProblemInteractionTest(SeleniumTest):
"""
A browser-based test of answering problems right and wrong.
"""
def setUp(self):
super(ProblemInteractionTest, self).setUp()
one_problem = """
<problem_demo>
<html_demo><p class="the_numbers">$a $b</p></html_demo>
<textinput_demo name="sum_input" input_type="int" />
<equality_demo name="sum_checker" left="./sum_input/@student_input" right="$c" />
<script>
import random
a = random.randint(1, 1000000)
b = random.randint(1, 1000000)
c = a + b
</script>
</problem_demo>
"""
self.num_problems = 3
scenarios.add_xml_scenario(
"test_many_problems", "Many problems",
"<vertical_demo>" + one_problem * self.num_problems + "</vertical_demo>"
)
self.addCleanup(scenarios.remove_scenario, "test_many_problems")
def test_many_problems(self):
# Test that problems work properly.
self.browser.get(self.live_server_url + "/scenario/test_many_problems")
header1 = BrowserQuery(self.browser, css="h1")
self.assertEqual(header1.text[0], "XBlock: Many problems")
# Find the numbers on the page.
nums = self.browser.find_elements_by_css_selector("p.the_numbers")
num_pairs = [tuple(int(n) for n in num.text.split()) for num in nums]
# They should be all different.
self.assertEqual(len(set(num_pairs)), self.num_problems)
text_ctrls_xpath = '//div[@data-block-type="textinput_demo"][@data-name="sum_input"]/input'
text_ctrls = self.browser.find_elements_by_xpath(text_ctrls_xpath)
check_btns = BrowserQuery(self.browser, css='input.check')
check_indicators = 'span.indicator'
def assert_image(right_wrong_idx, expected_icon):
"""Assert that the img src text includes `expected_icon`"""
for _ in range(3):
try:
sources = BrowserQuery(self.browser, css='{} img'.format(check_indicators)).nth(right_wrong_idx).attrs('src')
if sources and expected_icon in sources[0]:
break
else:
time.sleep(.25)
except StaleElementReferenceException as exc:
print exc
self.assertIn(expected_icon, sources[0])
for i in range(self.num_problems):
# Before answering, the indicator says Not Attempted.
self.assertIn("Not attempted", BrowserQuery(self.browser, css=check_indicators).nth(i).text[0])
answer = sum(num_pairs[i])
for _ in range(2):
# Answer right.
text_ctrls[i].clear()
text_ctrls[i].send_keys(str(answer))
check_btns[i].click()
assert_image(i, "/correct-icon.png")
# Answer wrong.
text_ctrls[i].clear()
text_ctrls[i].send_keys(str(answer + 1))
check_btns[i].click()
assert_image(i, "/incorrect-icon.png")
| agpl-3.0 | 2,430,540,823,265,295,400 | 39.918605 | 129 | 0.56948 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/ubiquity/plugins/ubi-wireless.py | 1 | 5701 | # -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2010 Canonical Ltd.
# Written by Evan Dandrea <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from ubiquity import plugin, misc
import os
NAME = 'wireless'
#after prepare for default install, but language for oem install
AFTER = ['prepare', 'language']
WEIGHT = 12
class PageGtk(plugin.PluginUI):
plugin_title = 'ubiquity/text/wireless_heading_label'
def __init__(self, controller, *args, **kwargs):
from ubiquity import nm
from gi.repository import Gtk
if ('UBIQUITY_AUTOMATIC' in os.environ
or not nm.wireless_hardware_present() or misc.has_connection()):
self.page = None
return
self.controller = controller
builder = Gtk.Builder()
self.controller.add_builder(builder)
builder.add_from_file(os.path.join(os.environ['UBIQUITY_GLADE'], 'stepWireless.ui'))
builder.connect_signals(self)
self.page = builder.get_object('stepWireless')
self.nmwidget = builder.get_object('nmwidget')
self.nmwidget.connect('connection', self.state_changed)
self.nmwidget.connect('selection_changed', self.selection_changed)
self.use_wireless = builder.get_object('use_wireless')
self.use_wireless.connect('toggled', self.wireless_toggled)
self.plugin_widgets = self.page
self.have_selection = False
self.state = self.nmwidget.get_state()
self.next_normal = True
self.back_normal = True
self.connect_text = None
self.stop_text = None
def plugin_translate(self, lang):
get_s = self.controller.get_string
self.connect_text = get_s('ubiquity/text/connect', lang)
self.stop_text = get_s('ubiquity/text/stop', lang)
frontend = self.controller._wizard
if not self.next_normal:
frontend.next.set_label(self.connect_text)
if not self.back_normal:
frontend.back.set_label(self.stop_text)
def selection_changed(self, unused):
from ubiquity import nm
self.have_selection = True
self.use_wireless.set_active(True)
assert self.state is not None
frontend = self.controller._wizard
if self.state == nm.NM_STATE_CONNECTING:
frontend.translate_widget(frontend.next)
self.next_normal = True
else:
if (not self.nmwidget.is_row_an_ap()) or self.nmwidget.is_row_connected():
frontend.translate_widget(frontend.next)
self.next_normal = True
else:
frontend.next.set_label(self.connect_text)
self.next_normal = False
def wireless_toggled(self, unused):
frontend = self.controller._wizard
if self.use_wireless.get_active():
if not self.have_selection:
self.nmwidget.select_usable_row()
self.state_changed(None, self.state)
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
frontend.translate_widget(frontend.next)
self.next_normal = True
self.controller.allow_go_forward(True)
def plugin_on_back_clicked(self):
frontend = self.controller._wizard
if frontend.back.get_label() == self.stop_text:
self.nmwidget.disconnect_from_ap()
return True
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
return False
def plugin_on_next_clicked(self):
frontend = self.controller._wizard
if frontend.next.get_label() == self.connect_text:
self.nmwidget.connect_to_ap()
return True
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
return False
def state_changed(self, unused, state):
from ubiquity import nm
self.state = state
frontend = self.controller._wizard
if not self.use_wireless.get_active():
return
if state != nm.NM_STATE_CONNECTING:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
self.controller.allow_go_forward(True)
frontend.translate_widget(frontend.back)
self.back_normal = False
frontend.back.set_sensitive(True)
else:
frontend.connecting_spinner.show()
frontend.connecting_spinner.start()
frontend.connecting_label.show()
self.next_normal = True
frontend.back.set_label(self.stop_text)
self.back_normal = False
frontend.back.set_sensitive(True)
self.selection_changed(None)
| gpl-3.0 | 2,619,777,803,315,187,000 | 38.317241 | 92 | 0.632871 | false |
pepincho/Cinema-Reservation-System | add_records_to_cinema_database.py | 1 | 1311 | from create_cinema_database import CreateCinemaDatabase
class AddRecordsToCinemaDatabase:
@staticmethod
def add_movies(db):
db.add_movie("The Hunger Games: Catching Fire", 7.9)
db.add_movie("Wreck-It Ralph", 7.8)
db.add_movie("Her", 8.3)
db.add_movie("Spiderman", 9)
@staticmethod
def add_projections(db):
db.add_projection(1, "3D", "2014-04-01", "19:10")
db.add_projection(1, "2D", "2014-04-01", "19:00")
db.add_projection(1, "4DX", "2014-04-02", "21:00")
db.add_projection(3, "2D", "2014-04-05", "20:20")
db.add_projection(2, "3D", "2014-04-02", "22:00")
db.add_projection(2, "2D", "2014-04-02", "19:30")
db.add_projection(4, "4DX", "2014-09-19", "19:30")
db.add_projection(4, "3D", "2014-05-14", "19:30")
db.add_projection(4, "3D", "2014-05-14", "22:30")
db.add_projection(4, "5D", "2015-05-14", "19:30")
@staticmethod
def add_reservations(db):
db.add_reservation("RadoRado", 1, 2, 1)
db.add_reservation("RadoRado", 1, 3, 5)
db.add_reservation("RadoRado", 1, 7, 8)
db.add_reservation("Ivo", 3, 1, 1)
db.add_reservation("Ivo", 3, 1, 2)
db.add_reservation("Pesho", 5, 2, 3)
db.add_reservation("Pesho", 5, 2, 4)
| mit | 3,003,731,788,179,111,000 | 35.416667 | 60 | 0.56598 | false |
landier/imdb-crawler | crawler/libs/sqlalchemy/dialects/mssql/zxjdbc.py | 1 | 2550 | # mssql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Microsoft SQL Server database via the zxjdbc JDBC
connector.
JDBC Driver
-----------
Requires the jTDS driver, available from: http://jtds.sourceforge.net/
Connecting
----------
URLs are of the standard form of
``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``.
Additional arguments which may be specified either as query string
arguments on the URL, or as keyword arguments to
:func:`~sqlalchemy.create_engine()` will be passed as Connection
properties to the underlying JDBC driver.
"""
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
from sqlalchemy.engine import base
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
| gpl-3.0 | 7,793,973,673,749,884,000 | 33 | 84 | 0.658824 | false |
Who8MyLunch/Python-Embed-Demo | setup.py | 1 | 1048 |
import time
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
# Cython extension.
source_files = ['wrapper_inner.pyx']
include_dirs = ['C:/Python27/include',
'C:/Python27/Lib/site-packages/numpy/core/include']
extra_compile_args = ['/openmp', '/EHsc']
ext = Extension('wrapper_inner', source_files,
language='c++',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args)
# Time stamp.
localtime = time.localtime()
date_stamp = '%4i.%02i.%02i' % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday)
# Do it.
setup(name='wrapper_inner',
packages=find_packages(),
cmdclass={'build_ext':build_ext},
ext_modules=[ ext ],
# Metadata
version=date_stamp,
author='Pierre V. Villeneuve',
author_email='[email protected]',
description='Demonstration of Python/Cython embedded within C++',
)
| mit | -682,183,025,323,505,700 | 25.871795 | 87 | 0.666031 | false |
rocian/AdventOfCode2016 | 08/solutions.py | 1 | 5519 | #!/usr/bin/env python3
"""
https://adventofcode.com/2016
--- Day 8: Two-Factor Authentication ---
You come across a door implementing what you can only assume is an
implementation of two-factor authentication after a long game of requirements
telephone.
To get past the door, you first swipe a keycard (no problem; there was one on a
nearby desk). Then, it displays a code on a little screen, and you type that
code on a keypad. Then, presumably, the door unlocks.
Unfortunately, the screen has been smashed. After a few minutes, you've taken
everything apart and figured out how it works. Now you just have to work out
what the screen would have displayed.
The magnetic strip on the card you swiped encodes a series of instructions for
the screen; these instructions are your puzzle input. The screen is 50 pixels
wide and 6 pixels tall, all of which start off, and is capable of three somewhat
peculiar operations:
rect AxB turns on all of the pixels in a rectangle at the top-left of the
screen which is A wide and B tall.
rotate row y=A by B shifts all of the pixels in row A (0 is the top row)
right by B pixels. Pixels that would fall off the right end appear at the
left end of the row.
rotate column x=A by B shifts all of the pixels in column A (0 is the left
column) down by B pixels. Pixels that would fall off the bottom appear at
the top of the column.
For example, here is a simple sequence on a smaller screen:
rect 3x2 creates a small rectangle in the top-left corner:
###....
###....
.......
rotate column x=1 by 1 rotates the second column down by one pixel:
#.#....
###....
.#.....
rotate row y=0 by 4 rotates the top row right by four pixels:
....#.#
###....
.#.....
rotate column x=1 by 1 again rotates the second column down by one pixel,
causing the bottom pixel to wrap back to the top:
.#..#.#
#.#....
.#.....
As you can see, this display technology is extremely powerful, and will soon
dominate the tiny-code-displaying-screen market. That's what the advertisement
on the back of the display tries to convince you, anyway.
There seems to be an intermediate check of the voltage used by the display:
after you swipe your card, if the screen did work, how many pixels should be
lit?
--- Part Two ---
You notice that the screen is only capable of displaying capital letters; in the
font it uses, each letter is 5 pixels wide and 6 tall.
After you swipe your card, what code is the screen trying to display?
"""
import re
import numpy as np
def read_input():
""" This function read the instruction from the input file and
return a clean list of instruction."""
f = open('input', 'r')
string = f.read()
lstring = string.split("\n")
# we remove the last void instruction
# this could be made in a safer way
lstring = lstring[:-1]
return(lstring)
def array_roll(a, index, by, axis):
"Roll array row/coll by specified amount by."
if axis:
# if move by columns axis = 1, transpose array
a = np.transpose(a)
# roll row of `by` position
a[index] = np.roll(a[index], by)
if axis:
# if move by columns axis = 1, transpose again array
a = np.transpose(a)
return(a)
def process(monitor, instruction):
"""Process the instructions on the monitor and return the final monitor state."""
# create the opportune regex to capture instruction of operation
rect = re.compile(r"(\d+)x(\d+)")
rowr = re.compile(r"y=(\d+) by (\d+)")
colr = re.compile(r"x=(\d+) by (\d+)")
for operation in instruction:
if operation.startswith("rect"):
# fill rect dx x dy with 1
dx, dy = re.findall(rect, operation)[0]
monitor[0:int(dy), 0:int(dx)] = 1
elif operation.startswith("rotate column"):
# roll column `index` by `dy`
index, dy = re.findall(colr, operation)[0]
monitor = array_roll(monitor, int(index), int(dy), 1)
elif operation.startswith("rotate row"):
# roll row `index` by `dx`
index, dx = re.findall(rowr, operation)[0]
monitor = array_roll(monitor, int(index), int(dx), 0)
return(monitor)
def to_shape(monitor, nrow, ncol, by, voidc, fillc):
"Create shape letters from array"
# add 0 filled column to space letters
for c in range(ncol - by, 0, -by):
monitor = np.insert(monitor, c, 0, axis=1)
# replace 0 by `voidc` and 1 by `fillc`
# to iter tranform in a list and then agai in ndarray
monitor = [fillc if i else voidc for i in np.nditer(monitor, op_flags=['readwrite'])]
monitor = np.array(monitor).reshape(nrow, len(monitor) // nrow)
# create a string from array
string = "\n\n\t"
for row in monitor:
string += ''.join(row)
string += "\n\t"
return(string)
# number of rows and columns in monitor
nrow = 6
ncol = 50
# number of columns in a letter block
nby = 5
# chars for void and fill in a letter block
voidc = ' '
fillc = '█' # Unicode FULL BLOCK
# create the monitor as array
monitor = [0] * (nrow * ncol)
monitor = np.array(monitor).reshape(nrow, ncol)
# process instructions
monitor = process(monitor, read_input())
print("Day 8. Solution of part 1: {}".format(sum(sum(monitor))))
print("Day 8. Solution of part 2: {}".format(to_shape(monitor, nrow, ncol,
5, voidc, fillc)))
| gpl-3.0 | -1,507,390,072,424,425,200 | 29.65 | 89 | 0.65126 | false |
davipeterlini/routeflow_tcc | rflib/ipc/MongoIpc.py | 1 | 11834 | import rflib.ipc.RFProtocol as RFProtocol
import bson
import threading
import pymongo as mongo
import time
import sys
from rflib.ipc.Ipc import Ipc
from rflib.ipc.MongoUtils import MongoFactory
from rflib.defs import *
FIELD_NAME_ID = "_id"
FIELD_NAME_FROM = "from"
FIELD_NAME_TO = "to"
FIELD_NAME_TYPE = "type"
FIELD_NAME_READ = "read"
FIELD_NAME_CONTENT = "content"
# 1 MB for the capped collection
CC_SIZE = 1048576
class MongoIpc(Ipc):
def __init__(self, user_id, channel_id):
self._mf = MongoFactory()
self._producer_connection = self._mf.create_connection()
self._user_id = user_id
self._channel_id = channel_id
self._db_name = MONGO_DB_NAME
db = self._producer_connection[self._db_name]
try:
collection = mongo.collection.Collection(db, self._channel_id, True, capped=True, size=CC_SIZE)
collection.ensure_index([("_id", mongo.ASCENDING)])
collection.ensure_index([(FIELD_NAME_TO, mongo.ASCENDING)])
except:
print "channel already exists"
def listen(self, message_processor):
#self._producer_connection = self._mf.create_connection()
while True:
# tries to get unread messages
for i in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
cursor = collection.find(
{FIELD_NAME_TO: self._user_id, FIELD_NAME_READ: False},
tailable=True
)
#cursor OK, break for
break
except:
if (i + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: Could not get unread messages. Error: (", sys.exc_info(), ")"
return
print "[RECOVERING]MongoIPC: Could not get unread messages. Trying again in ", MONGO_RETRY_INTERVAL, " seconds. [", (i+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
while cursor.alive:
try:
envelope = next(cursor, None)
if envelope == None:
break;
except StopIteration:
time.sleep(1)
continue
except:
#print "[RECOVERING]MongoIPC: Fail to reach messages. Err:",sys.exc_info()
break;
ipc_message = MongoIpcMessageFactory.fromMongoMessageType(envelope)
message_processor.process(ipc_message);
# tries to mark message as read
for j in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
collection.update({"_id": envelope["_id"]},
{"$set": {FIELD_NAME_READ: True}})
# update done, break for
break
except:
if (j + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: The Message (id: ",
print envelope["_id"],
print ") could not be marked as read. ",
print "Error: (", sys.exc_info, ")"
sys.exit(1)
print "[RECOVERING]MongoIPC: Could not mark message ",
print "as read. Trying again in ",
print MONGO_RETRY_INTERVAL, " seconds. [", (j+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
print "[OK]MongoIPC: Message (id: ", envelope["_id"], ") was marked as Read."
time.sleep(0.05)
def parallel_listen(self, message_processor):
worker = threading.Thread(target=self.listen, args=(message_processor,))
worker.start()
def send(self, ipc_message):
#self._producer_connection = self._mf.create_connection()
mongo_message = MongoIpcMessageFactory.fromMessageType(ipc_message)
for i in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
collection.insert(mongo_message)
break;
except:
if (i + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: Message could not be sent. ",
print "Error: (", sys.exc_info(), ")"
sys.exit(1)
print "[RECOVERING]MongoIPC: Message not sent. ",
print "Trying again in ", MONGO_RETRY_INTERVAL, " seconds. ",
print "[", (i+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
print "[OK]MongoIPC: Message sent"
return True
class MongoIpcMessageFactory:
"""This class implements a factory to build a Ipc Message object from Bson Object and vice versa"""
@staticmethod
def fromMongoMessageType(mongo_obj):
"""Receives mongo BSONObj and build an
ipc message object, based on message type"""
#message = bson.BSON.decode(mongo_obj)
message = mongo_obj
message_content = message[FIELD_NAME_CONTENT]
ipc_message = None
if int(message[FIELD_NAME_TYPE]) == RFProtocol.PORT_REGISTER:
ipc_message = RFProtocol.PortRegister()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_hwaddress(message_content["hwaddress"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.PORT_CONFIG:
ipc_message = RFProtocol.PortConfig()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_operation_id(message_content["operation_id"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATAPATH_PORT_REGISTER:
ipc_message = RFProtocol.DatapathPortRegister()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
ipc_message.set_dp_port(message_content["dp_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATAPATH_DOWN:
ipc_message = RFProtocol.DatapathDown()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.VIRTUAL_PLANE_MAP:
ipc_message = RFProtocol.VirtualPlaneMap()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_vs_id(message_content["vs_id"])
ipc_message.set_vs_port(message_content["vs_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATA_PLANE_MAP:
ipc_message = RFProtocol.DataPlaneMap()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
ipc_message.set_dp_port(message_content["dp_port"])
ipc_message.set_vs_id(message_content["vs_id"])
ipc_message.set_vs_port(message_content["vs_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.ROUTE_MOD:
ipc_message = RFProtocol.RouteMod()
ipc_message.set_mod(message_content["mod"])
ipc_message.set_id(message_content["id"])
ipc_message.set_matches(message_content["matches"])
ipc_message.set_actions(message_content["actions"])
ipc_message.set_options(message_content["options"])
else:
return None
ipc_message.set_message_id(message[FIELD_NAME_ID])
ipc_message.set_to(message[FIELD_NAME_TO])
ipc_message.set_from(message[FIELD_NAME_FROM])
ipc_message.set_read(message[FIELD_NAME_READ])
return ipc_message
@staticmethod
def fromMessageType(ipc_message):
"""Receives the ipc message object and build a mongo Bson Object,
based on message type"""
mongo_message = {}
mongo_message[FIELD_NAME_ID] = bson.objectid.ObjectId(ipc_message.get_message_id())
mongo_message[FIELD_NAME_TO] = str(ipc_message.get_to())
mongo_message[FIELD_NAME_FROM] = str(ipc_message.get_from())
mongo_message[FIELD_NAME_READ] = ipc_message.is_read()
mongo_message[FIELD_NAME_TYPE] = ipc_message.get_type()
message_content = {}
if int(ipc_message.get_type()) == RFProtocol.PORT_REGISTER:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["hwaddress"] = str(ipc_message.get_hwaddress())
elif int(ipc_message.get_type()) == RFProtocol.PORT_CONFIG:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["operation_id"] = str(ipc_message.get_operation_id())
elif int(ipc_message.get_type()) == RFProtocol.DATAPATH_PORT_REGISTER:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
message_content["dp_port"] = str(ipc_message.get_dp_port())
elif int(ipc_message.get_type()) == RFProtocol.DATAPATH_DOWN:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
elif int(ipc_message.get_type()) == RFProtocol.VIRTUAL_PLANE_MAP:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["vs_id"] = str(ipc_message.get_vs_id())
message_content["vs_port"] = str(ipc_message.get_vs_port())
elif int(ipc_message.get_type()) == RFProtocol.DATA_PLANE_MAP:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
message_content["dp_port"] = str(ipc_message.get_dp_port())
message_content["vs_id"] = str(ipc_message.get_vs_id())
message_content["vs_port"] = str(ipc_message.get_vs_port())
elif int(ipc_message.get_type()) == RFProtocol.ROUTE_MOD:
message_content["mod"] = str(ipc_message.get_mod())
message_content["id"] = str(ipc_message.get_id())
message_content["matches"] = ipc_message.get_matches()
message_content["actions"] = ipc_message.get_actions()
message_content["options"] = ipc_message.get_options()
else:
return None
mongo_message[FIELD_NAME_CONTENT] = message_content
return mongo_message
| apache-2.0 | -1,613,797,404,470,370,000 | 43.656604 | 164 | 0.532787 | false |
bkolada/koalocleaner | uis/compiled/mainwindow_ui.py | 1 | 11113 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_TEM.ui'
#
# Created: Sun Apr 06 23:25:46 2014
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1043, 608)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.widget_8 = QtGui.QWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_8.sizePolicy().hasHeightForWidth())
self.widget_8.setSizePolicy(sizePolicy)
self.widget_8.setObjectName(_fromUtf8("widget_8"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.widget_8)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.widget_5 = QtGui.QWidget(self.widget_8)
self.widget_5.setObjectName(_fromUtf8("widget_5"))
self.gridLayout = QtGui.QGridLayout(self.widget_5)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.open_epub = QtGui.QPushButton(self.widget_5)
self.open_epub.setObjectName(_fromUtf8("open_epub"))
self.gridLayout.addWidget(self.open_epub, 0, 1, 1, 1)
self.open_annot = QtGui.QPushButton(self.widget_5)
self.open_annot.setObjectName(_fromUtf8("open_annot"))
self.gridLayout.addWidget(self.open_annot, 0, 0, 1, 1)
self.save_epub = QtGui.QPushButton(self.widget_5)
self.save_epub.setObjectName(_fromUtf8("save_epub"))
self.gridLayout.addWidget(self.save_epub, 0, 2, 1, 1)
self.verticalLayout_4.addWidget(self.widget_5)
self.widget_2 = QtGui.QWidget(self.widget_8)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget_2)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.widget_6 = QtGui.QWidget(self.widget_2)
self.widget_6.setObjectName(_fromUtf8("widget_6"))
self.gridLayout_2 = QtGui.QGridLayout(self.widget_6)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_4 = QtGui.QLabel(self.widget_6)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 0, 0, 1, 1)
self.annot_table = QtGui.QTableWidget(self.widget_6)
self.annot_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.annot_table.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.annot_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.annot_table.setObjectName(_fromUtf8("annot_table"))
self.annot_table.setColumnCount(6)
self.annot_table.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.annot_table.setHorizontalHeaderItem(5, item)
self.annot_table.horizontalHeader().setStretchLastSection(True)
self.gridLayout_2.addWidget(self.annot_table, 1, 0, 1, 1)
self.horizontalLayout_2.addWidget(self.widget_6)
self.verticalLayout_4.addWidget(self.widget_2)
self.load = QtGui.QPushButton(self.widget_8)
self.load.setObjectName(_fromUtf8("load"))
self.verticalLayout_4.addWidget(self.load)
self.widget_4 = QtGui.QWidget(self.widget_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_4.sizePolicy().hasHeightForWidth())
self.widget_4.setSizePolicy(sizePolicy)
self.widget_4.setObjectName(_fromUtf8("widget_4"))
self.horizontalLayout = QtGui.QHBoxLayout(self.widget_4)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.widget = QtGui.QWidget(self.widget_4)
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.widget)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
self.before = QtGui.QTextEdit(self.widget)
self.before.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(10)
self.before.setFont(font)
self.before.setObjectName(_fromUtf8("before"))
self.verticalLayout_2.addWidget(self.before)
self.horizontalLayout.addWidget(self.widget)
self.widget_3 = QtGui.QWidget(self.widget_4)
self.widget_3.setObjectName(_fromUtf8("widget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.widget_3)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_3 = QtGui.QLabel(self.widget_3)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_3.addWidget(self.label_3)
self.after = QtGui.QTextEdit(self.widget_3)
font = QtGui.QFont()
font.setPointSize(10)
self.after.setFont(font)
self.after.setObjectName(_fromUtf8("after"))
self.verticalLayout_3.addWidget(self.after)
self.horizontalLayout.addWidget(self.widget_3)
self.verticalLayout_4.addWidget(self.widget_4)
self.widget_7 = QtGui.QWidget(self.widget_8)
self.widget_7.setObjectName(_fromUtf8("widget_7"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.widget_7)
self.horizontalLayout_4.setMargin(0)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.save_paragraph_changes = QtGui.QPushButton(self.widget_7)
self.save_paragraph_changes.setObjectName(_fromUtf8("save_paragraph_changes"))
self.horizontalLayout_4.addWidget(self.save_paragraph_changes)
self.verticalLayout_4.addWidget(self.widget_7)
self.horizontalLayout_3.addWidget(self.widget_8)
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.plainTextEdit_3 = QtGui.QPlainTextEdit(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plainTextEdit_3.sizePolicy().hasHeightForWidth())
self.plainTextEdit_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lucida Console"))
font.setPointSize(7)
self.plainTextEdit_3.setFont(font)
self.plainTextEdit_3.setObjectName(_fromUtf8("plainTextEdit_3"))
self.verticalLayout.addWidget(self.plainTextEdit_3)
self.horizontalLayout_3.addWidget(self.groupBox)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Th3 3p4b M4s73r", None, QtGui.QApplication.UnicodeUTF8))
self.open_epub.setText(QtGui.QApplication.translate("MainWindow", "Open epub", None, QtGui.QApplication.UnicodeUTF8))
self.open_annot.setText(QtGui.QApplication.translate("MainWindow", "Open annotations", None, QtGui.QApplication.UnicodeUTF8))
self.save_epub.setText(QtGui.QApplication.translate("MainWindow", "Save changes", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Available annotations", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.setSortingEnabled(True)
self.annot_table.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("MainWindow", "Annotation", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("MainWindow", "Start par", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.horizontalHeaderItem(3).setText(QtGui.QApplication.translate("MainWindow", "Start pos", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.horizontalHeaderItem(4).setText(QtGui.QApplication.translate("MainWindow", "End pos", None, QtGui.QApplication.UnicodeUTF8))
self.annot_table.horizontalHeaderItem(5).setText(QtGui.QApplication.translate("MainWindow", "Comment", None, QtGui.QApplication.UnicodeUTF8))
self.load.setText(QtGui.QApplication.translate("MainWindow", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Paragraph before", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Paragraph after", None, QtGui.QApplication.UnicodeUTF8))
self.save_paragraph_changes.setText(QtGui.QApplication.translate("MainWindow", "Save changes in paragraph", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Logger", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -3,992,711,767,065,595,000 | 58.747312 | 154 | 0.714479 | false |
daviessm/heating | temp_sensor.py | 1 | 6139 | import struct, time, logging, threading
import dbus
from bluepy.btle import Scanner, DefaultDelegate, Peripheral, BTLEException
from bluepy import btle
logger = logging.getLogger('heating')
class TempSensor(object):
_scanning_lock = threading.Lock()
def __init__(self, peripheral):
self.mac = peripheral.addr
self.sent_alert = False
self.amb_temp = None
self.temp_job_id = None
self.peripheral = Peripheral(peripheral)
self.characteristics = {}
def connect(self):
self.tag.connect()
def disconnect(self):
self.tag.disconnect()
def get_ambient_temp(self):
pass
def _write_uuid(self, uuid, data):
try:
if not uuid in self.characteristics:
self.characteristics[uuid] = self.peripheral.getCharacteristics(uuid=uuid)[0]
#If there's still no characteristic, error
if not uuid in self.characteristics:
raise Exception('UUID ' + str(uuid) + ' not found on device ' + self.mac)
self.characteristics[uuid].write(data)
except BTLEException as e:
logger.warn(self.mac + ' disconnected. Try to reconnect.')
raise DisconnectedException(e.message)
def _read_uuid(self, uuid):
try:
if not uuid in self.characteristics:
self.characteristics[uuid] = self.peripheral.getCharacteristics(uuid=uuid)[0]
#If there's still no characteristic, error
if not uuid in self.characteristics:
raise Exception('UUID ' + str(uuid) + ' not found on device ' + self.mac)
return self.characteristics[uuid].read()
except BTLEException as e:
logger.warn(self.mac + ' disconnected. Try to reconnect.')
raise DisconnectedException(e.message)
@staticmethod
def find_temp_sensors(sensors):
TempSensor._scanning_lock.acquire()
logger.debug('Scanning for devices')
scanner = Scanner().withDelegate(ScanDelegate())
try:
devices = scanner.scan(10.0)
if sensors is None:
sensors = {}
for device in devices:
if device.addr in sensors:
continue
name = ''
if device.getValueText(9):
name = device.getValueText(9)
elif device.getValueText(8):
name = device.getValueText(8)
logger.debug('Device name: ' + name)
if 'SensorTag' in name:
logger.info('Found SensorTag with address: ' + device.addr)
sensors[device.addr] = SensorTag(device)
elif 'MetaWear' in name:
logger.info('Found MetaWear with address: ' + device.addr)
sensors[device.addr] = MetaWear(device)
logger.debug('Finished scanning for devices')
TempSensor._scanning_lock.release()
if len(sensors) == 0:
raise NoTagsFoundException('No sensors found!')
except BTLEException as e:
scanner.stop()
logger.warn('Got exception ' + e.message)
TempSensor._scanning_lock.release()
return sensors
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
pass
class SensorTag(TempSensor):
def __init__(self, peripheral):
TempSensor.__init__(self, peripheral)
def get_ambient_temp(self):
tAmb = 0
failures = 0
while tAmb == 0 and failures < 4:
try:
#Turn red LED on
self._write_uuid('f000aa65-0451-4000-b000-000000000000', b'\x01')
self._write_uuid('f000aa66-0451-4000-b000-000000000000', b'\x01')
#Turn temperature sensor on
self._write_uuid('f000aa02-0451-4000-b000-000000000000', b'\x01')
time.sleep(0.1)
#Turn red LED off
self._write_uuid('f000aa65-0451-4000-b000-000000000000', b'\x00')
self._write_uuid('f000aa66-0451-4000-b000-000000000000', b'\x00')
#Wait for reading
count = 0
while tAmb == 0 and count < 8:
count += 1
time.sleep(0.2)
result = self._read_uuid('f000aa01-0451-4000-b000-000000000000')
(rawVobj, rawTamb) = struct.unpack('<hh', result)
tAmb = rawTamb / 128.0
#Turn temperature sensor off
self._write_uuid('f000aa02-0451-4000-b000-000000000000', b'\x00')
if count == 8:
failures += 1
else:
failures = 0
except DisconnectedException as e:
raise NoTemperatureException(e.message)
if tAmb == 0:
self.amb_temp = None
raise NoTemperatureException('Could not get temperature from ' + self.mac)
logger.info('Got temperature ' + str(tAmb) + ' from ' + self.mac)
self.amb_temp = tAmb
class MetaWear(TempSensor):
def __init__(self, peripheral):
TempSensor.__init__(self, peripheral)
def get_ambient_temp(self):
self.connect()
tAmb = 0
failures = 0
while tAmb == 0 and failures < 4:
try:
#Turn red LED on
self._write_uuid('326a9001-85cb-9195-d9dd-464cfbbae75a', b'\x02\x03\x01\x02\x1f\x1f\x00\x00\xd0\x07\x00\x00\xd0\x07\x00\x00\xff')
self._write_uuid('326a9001-85cb-9195-d9dd-464cfbbae75a', b'\x02\x01\x02')
#Turn temperature sensor on
self._write_uuid('326a9001-85cb-9195-d9dd-464cfbbae75a', b'\x04\x81\x01')
time.sleep(0.1)
#Turn red LED off
self._write_uuid('326a9001-85cb-9195-d9dd-464cfbbae75a', b'\x02\x02\x01')
#Wait for reading
count = 0
while tAmb == 0 and count < 8:
count += 1
time.sleep(0.2)
result = self._read_uuid('326a9006-85cb-9195-d9dd-464cfbbae75a')
(rawTamb,) = struct.unpack('<xxxh', str(result))
tAmb = rawTamb / 8.0
if count == 8:
failures += 1
else:
failures = 0
except DisconnectedException as e:
raise NoTemperatureException(e.message)
if tAmb == 0:
self.amb_temp = None
raise NoTemperatureException('Could not get temperature from ' + self.mac)
logger.info('Got temperature ' + str(tAmb) + ' from ' + self.mac)
self.amb_temp = tAmb
class NoTagsFoundException(Exception):
pass
class DisconnectedException(Exception):
pass
class NoTemperatureException(Exception):
pass
| gpl-3.0 | -601,084,842,485,565,200 | 30.005051 | 137 | 0.633654 | false |
timbalam/GroopM | groopm/tests/db_upgrade_data/run_db_upgrade.py | 1 | 2599 | #!/usr/bin/env python
###############################################################################
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 3.0 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library. #
# #
###############################################################################
__author__ = "Tim Lamberton"
__copyright__ = "Copyright 2015"
__credits__ = ["Tim Lamberton"]
__license__ = "GPL3"
__maintainer__ = "Tim Lamberton"
__email__ = "[email protected]"
###############################################################################
# system imports
import sys
# groopm imports
from groopm.data3 import DataManager
from groopm.groopmTimekeeper import TimeKeeper
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
try:
dbFileName = sys.argv[1]
except IndexError:
print "USAGE: %s DATABASE" % sys.argv[0]
sys.exit(1)
timer = TimeKeeper()
DataManager().checkAndUpgradeDB(dbFileName, timer, silent=False)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
| gpl-3.0 | 6,770,974,434,482,290,000 | 48.980769 | 79 | 0.333975 | false |
TitanEmbeds/Titan | webapp/titanembeds/database/guilds.py | 1 | 3023 | from titanembeds.database import db
class Guilds(db.Model):
__tablename__ = "guilds"
guild_id = db.Column(db.BigInteger, nullable=False, primary_key=True) # Discord guild id
unauth_users = db.Column(db.Boolean(), nullable=False, default=1) # If allowed unauth users
visitor_view = db.Column(db.Boolean(), nullable=False, default=0) # If users are automatically "signed in" and can view chat
webhook_messages = db.Column(db.Boolean(), nullable=False, default=0) # Use webhooks to send messages instead of the bot
guest_icon = db.Column(db.String(255), default=None) # Guest icon url, None if unset
chat_links = db.Column(db.Boolean(), nullable=False, default=1) # If users can post links
bracket_links = db.Column(db.Boolean(), nullable=False, default=1) # If appending brackets to links to prevent embed
unauth_captcha = db.Column(db.Boolean(), nullable=False, server_default="1")# Enforce captcha on guest users
mentions_limit = db.Column(db.Integer, nullable=False, default=11) # If there is a limit on the number of mentions in a msg
invite_link = db.Column(db.String(255)) # Custom Discord Invite Link
post_timeout = db.Column(db.Integer, nullable=False, server_default="5") # Seconds to elapse before another message can be posted from the widget
max_message_length = db.Column(db.Integer, nullable=False, server_default="300") # Chars length the message should be before being rejected by the server
banned_words_enabled = db.Column(db.Boolean(), nullable=False, server_default="0") # If banned words are enforced
banned_words_global_included = db.Column(db.Boolean(), nullable=False, server_default="0") # Add global banned words to the list
banned_words = db.Column(db.Text(), nullable=False, server_default="[]") # JSON list of strings to block from sending
autorole_unauth = db.Column(db.BigInteger, nullable=True, server_default=None) # Automatic Role inherit for unauthenticated users
autorole_discord = db.Column(db.BigInteger, nullable=True, server_default=None) # Automatic Role inherit for discord users
file_upload = db.Column(db.Boolean(), nullable=False, server_default="0") # Allow file uploading for server
send_rich_embed = db.Column(db.Boolean(), nullable=False, server_default="0") # Allow sending rich embed messages
def __init__(self, guild_id):
self.guild_id = guild_id
self.unauth_users = True # defaults to true
self.visitor_view = False
self.webhook_messages = False
self.guest_icon = None
self.chat_links = True
self.bracket_links = True
self.unauth_captcha = True
self.mentions_limit = -1 # -1 = unlimited mentions
def __repr__(self):
return '<Guilds {0}>'.format(self.guild_id)
def set_unauthUsersBool(self, value):
self.unauth_users = value
return self.unauth_users
| agpl-3.0 | 7,627,332,055,170,403,000 | 72.731707 | 157 | 0.677473 | false |
mayhem/led-chandelier | software/tbd/filter.py | 1 | 3645 | #!/usr/bin/python
import abc
import math
import common
from color import Color
class Filter(common.ChainLink):
def __init__(self):
self.next = None
super(Filter, self).__init__()
def call_filter(self, t, col):
if self.next:
return self.next.filter(t, col)
return col
@abc.abstractmethod
def filter(self, t, color):
pass
class FadeIn(Filter):
def __init__(self, duration = 1.0, offset = 0.0):
self.duration = duration
self.offset = offset
super(FadeIn, self).__init__()
def describe(self):
desc = common.make_function(common.FUNC_FADE_IN, (common.ARG_VALUE, common.ARG_VALUE))
desc += common.pack_fixed(self.duration)
desc += common.pack_fixed(self.offset)
#print "%s(%.3f, %.3f)" % (self.__class__.__name__, self.duration, self.offset)
return desc + self.describe_next()
def filter(self, t, color):
if t < self.offset:
return Color(0,0,0)
if t < self.offset + self.duration:
percent = (t - self.offset) / self.duration
return Color( int(color[0] * percent), int(color[1] * percent), int(color[2] * percent))
return self.call_next(t, color)
class FadeOut(Filter):
def __init__(self, duration = 1.0, offset = 0.0):
self.duration = duration
self.offset = offset
super(FadeOut, self).__init__()
def describe(self):
desc = common.make_function(common.FUNC_FADE_OUT, (common.ARG_VALUE, common.ARG_VALUE))
desc += common.pack_fixed(self.duration)
desc += common.pack_fixed(self.offset)
return desc + self.describe_next()
def filter(self, t, color):
if t >= self.offset + self.duration:
return Color(0,0,0)
if t >= self.offset:
percent = 1.0 - ((t - self.offset) / self.duration)
return Color( int(color[0] * percent), int(color[1] * percent), int(color[2] * percent))
return self.call_next(t, color)
class Brightness(Filter):
def __init__(self, gen):
self.gen = gen
super(Brightness, self).__init__()
def describe(self):
desc = common.make_function(common.FUNC_BRIGHTNESS, (common.ARG_FUNC,))
#print "%s("% self.__class__.__name__,
desc += self.gen.describe()
#print ")"
return desc + self.describe_next()
def filter(self, t, color):
percent = self.gen[t]
#print " %3.2f %3d,%3d,%3d" % (percent, color[0], color[1], color[2])
return self.call_next(t, Color(int(color[0] * percent), int(color[1] * percent), int(color[2] * percent)))
class ColorShift(Filter):
def __init__(self, h_shift, s_shift, v_shift):
self.h_shift = h_shift
self.s_shift = s_shift
self.v_shift = v_shift
super(ColorShift, self).__init__()
def describe(self):
desc = common.make_function(common.FUNC_COLOR_SHIFT, (common.ARG_VALUE,common.ARG_VALUE,common.ARG_VALUE))
desc += common.pack_fixed(self.h_shift)
desc += common.pack_fixed(self.s_shift)
desc += common.pack_fixed(self.v_shift)
return desc + self.describe_next()
def filter(self, t, color):
h,s,v = colorsys.rgb_to_hsv(self.color.color[0] / 255.0, self.color.color[1] / 255.0, self.color.color[2] / 255.0)
h = (h + self.h_shift) % 1.0
s = min(0.0, max(1.0, s + self.s_shift))
v = min(0.0, max(1.0, v + self.v_shift))
col = colorsys.hsv_to_rgb(h, s, v)
return self.call_next(t, Color(int(col[0] * 255), int(col[1] * 255), int(col[2] * 255)))
| mit | -1,335,183,906,565,119,500 | 33.386792 | 122 | 0.576955 | false |
sirmmo/django-webdav | setup.py | 1 | 1864 | #!/usr/bin/env python
#
# Copyright (c) 2011, SmartFile <[email protected]>
# All rights reserved.
#
# This file is part of django-webdav.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with django-webdav. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
setup(
name='django-webdav',
version='0.1',
description=('A WebDAV server for Django.'),
long_description=(
"""
WebDAV implemented as a Django application. The motivation for this project is to
allow authentication of users against Django's contrib.auth system, while also
exporting different directories per user. Many Django tools and app can be combined
with this such as django-digest etc. to provide a powerful WebDAV server.
"""
),
author='SmartFile',
author_email='[email protected]',
url='http://code.google.com/p/django-webdav/',
packages=['django_webdav'],
#'django_webdav.samples'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False,
)
| agpl-3.0 | -7,929,439,825,193,218,000 | 36.28 | 83 | 0.703326 | false |
lwalstad-pivotal/unified-demonstration-sentiment_01 | twitter-nlp/app.py | 1 | 3000 | """
twitter nlp
~~~~~~~~~~~
Serves the web application and sends tweets and tweet data using Server Side Events (SSE)
"""
import json
import os
import time
import redis
from flask import Flask, request, Response, render_template
from gevent import monkey, sleep, socket
from gevent.pywsgi import WSGIServer
import helper_functions
monkey.patch_all()
app = Flask(__name__)
redis.connection.socket = socket
# connect to redis for storing logging info
r = helper_functions.connect_redis_db()
# twitter compute and stats URL, from manifest
SENTIMENT_COMPUTE_URL=os.getenv('SENTIMENT_COMPUTE_URL',None)
SENTIMENT_STATS_URL=os.getenv('SENTIMENT_STATS_URL',None)
def gen_dashboard_tweets():
n = 5 # min number of seconds between each tweet
pubsub = r.pubsub()
pubsub.subscribe('tweet_msgs')
for message in pubsub.listen():
is_a_tweet = message['type'] == 'message'
is_tweet_message = message['channel'] == 'tweet_msgs'
if not is_a_tweet or not is_tweet_message:
continue
msg = json.loads(message['data'])
tweet_sent = {"data": json.dumps({"tweet": msg['text'],
"polarity": '{:1.2f}'.format(msg['polarity'])})}
yield (helper_functions.sse_pack(tweet_sent))
sleep(n-2) # new tweet won't get published for n seconds, let python rest
def get_tweet_stats():
pubsub = r.pubsub()
pubsub.subscribe('tweet_stats')
for message in pubsub.listen():
time_start = time.time()
is_a_tweet = message['type'] == 'message'
is_a_tweet_stat = message['channel'] == 'tweet_stats'
if not is_a_tweet or not is_a_tweet_stat:
continue
tweet_stats = json.loads(message['data'])
time_start = time.time()
yield helper_functions.sse_pack({"data": json.dumps({"tweetRate": tweet_stats['tweet_rate'],
"avgPolarity": tweet_stats['avg_polarity']})})
@app.route('/live_tweets')
def live_tweets_sse():
#tweet_sent = {"data": json.dumps({"tweet": msg['text'],
# "polarity": '{:1.2f}'.format(msg['polarity'])})}
#yield (helper_functions.sse_pack(tweet_sent))
return "data: testing"
#return Response(gen_dashboard_tweets(),mimetype='text/event-stream')
@app.route('/tweet_rate')
def tweet_rate_sse():
return Response(get_tweet_stats(),mimetype='text/event-stream')
@app.route('/')
def page():
return render_template('index.html',
SENTIMENT_COMPUTE_URL = SENTIMENT_COMPUTE_URL,
SENTIMENT_STATS_URL = SENTIMENT_STATS_URL)
if __name__ == '__main__':
if os.environ.get('VCAP_SERVICES') is None: # running locally
PORT = 5001
app.debug = True
else: # running on CF
PORT = int(os.getenv("PORT"))
http_server = WSGIServer(('0.0.0.0',PORT), app)
http_server.serve_forever() | mit | -6,753,234,470,440,190,000 | 33.494253 | 107 | 0.604 | false |
liftoff/pyminifier | pyminifier/__main__.py | 1 | 5284 | from optparse import OptionParser
import sys
from . import pyminify
from . import __version__
py3 = False
lzma = False
if not isinstance(sys.version_info, tuple):
if sys.version_info.major == 3:
py3 = True
try:
import lzma
except ImportError:
pass
def main():
"""
Sets up our command line options, prints the usage/help (if warranted), and
runs :py:func:`pyminifier.pyminify` with the given command line options.
"""
usage = '%prog [options] "<input file>"'
if '__main__.py' in sys.argv[0]: # python -m pyminifier
usage = 'pyminifier [options] "<input file>"'
parser = OptionParser(usage=usage, version=__version__)
parser.disable_interspersed_args()
parser.add_option(
"-o", "--outfile",
dest="outfile",
default=None,
help="Save output to the given file.",
metavar="<file path>"
)
parser.add_option(
"-d", "--destdir",
dest="destdir",
default="./minified",
help=("Save output to the given directory. "
"This option is required when handling multiple files. "
"Defaults to './minified' and will be created if not present. "),
metavar="<file path>"
)
parser.add_option(
"--nominify",
action="store_true",
dest="nominify",
default=False,
help="Don't bother minifying (only used with --pyz).",
)
parser.add_option(
"--use-tabs",
action="store_true",
dest="tabs",
default=False,
help="Use tabs for indentation instead of spaces.",
)
parser.add_option(
"--bzip2",
action="store_true",
dest="bzip2",
default=False,
help=("bzip2-compress the result into a self-executing python script. "
"Only works on stand-alone scripts without implicit imports.")
)
parser.add_option(
"--gzip",
action="store_true",
dest="gzip",
default=False,
help=("gzip-compress the result into a self-executing python script. "
"Only works on stand-alone scripts without implicit imports.")
)
if lzma:
parser.add_option(
"--lzma",
action="store_true",
dest="lzma",
default=False,
help=("lzma-compress the result into a self-executing python script. "
"Only works on stand-alone scripts without implicit imports.")
)
parser.add_option(
"--pyz",
dest="pyz",
default=None,
help=("zip-compress the result into a self-executing python script. "
"This will create a new file that includes any necessary implicit"
" (local to the script) modules. Will include/process all files "
"given as arguments to pyminifier.py on the command line."),
metavar="<name of archive>.pyz"
)
parser.add_option(
"-O", "--obfuscate",
action="store_true",
dest="obfuscate",
default=False,
help=(
"Obfuscate all function/method names, variables, and classes. "
"Default is to NOT obfuscate."
)
)
parser.add_option(
"--obfuscate-classes",
action="store_true",
dest="obf_classes",
default=False,
help="Obfuscate class names."
)
parser.add_option(
"--obfuscate-functions",
action="store_true",
dest="obf_functions",
default=False,
help="Obfuscate function and method names."
)
parser.add_option(
"--obfuscate-variables",
action="store_true",
dest="obf_variables",
default=False,
help="Obfuscate variable names."
)
parser.add_option(
"--obfuscate-import-methods",
action="store_true",
dest="obf_import_methods",
default=False,
help="Obfuscate globally-imported mouled methods (e.g. 'Ag=re.compile')."
)
parser.add_option(
"--obfuscate-builtins",
action="store_true",
dest="obf_builtins",
default=False,
help="Obfuscate built-ins (i.e. True, False, object, Exception, etc)."
)
parser.add_option(
"--replacement-length",
dest="replacement_length",
default=1,
help=(
"The length of the random names that will be used when obfuscating "
"identifiers."
),
metavar="1"
)
parser.add_option(
"--nonlatin",
action="store_true",
dest="use_nonlatin",
default=False,
help=(
"Use non-latin (unicode) characters in obfuscation (Python 3 only)."
" WARNING: This results in some SERIOUSLY hard-to-read code."
)
)
parser.add_option(
"--prepend",
dest="prepend",
default=None,
help=(
"Prepend the text in this file to the top of our output. "
"e.g. A copyright notice."
),
metavar="<file path>"
)
options, files = parser.parse_args()
if not files:
parser.print_help()
sys.exit(2)
pyminify(options, files)
if __name__ == "__main__":
main()
| gpl-3.0 | -5,764,630,278,049,702,000 | 29.194286 | 83 | 0.554883 | false |
Harvard-ATG/HarvardCards | flash/management/commands/convert_media_store.py | 1 | 1968 | """This command is intended for one-time use to migrate previously uploaded
files (images, audio, etc) to the new MediaStore model."""
from django.core.management.base import BaseCommand, CommandError
from harvardcards.settings.common import MEDIA_ROOT
from flash.services import handle_uploaded_media_file
from flash.models import Cards_Fields
import os
import re
class Command(BaseCommand):
help = 'Converts files to the new MediaStore model.'
def handle(self, *args, **kwargs):
cards_fields = Cards_Fields.objects.all().select_related('field')
pathre = re.compile('^\d+_\d+\/')
count = 0
fieldmap = {}
for cf in cards_fields:
# skip empty fields or fields that don't match "[DECK_ID]_[COLLECTION_ID]/"
if cf.value == '' or not pathre.match(cf.value):
continue
# check to see if there's an original file, which would be located here
# if it was previously resized
filepath = os.path.abspath(os.path.join(MEDIA_ROOT, 'originals', cf.value))
if not os.path.exists(filepath):
# otherwise it's probably an audio file, which would be here
filepath = os.path.abspath(os.path.join(MEDIA_ROOT, cf.value))
# add the file to the media store and save the new field value
if os.path.exists(filepath):
result = handle_uploaded_media_file(filepath, cf.field.field_type)
logstr = "cf.id=%s cf.field.field_type=%s cf.value=%s filepath=%s result=%s"
self.stdout.write(logstr % (cf.id, cf.field.field_type, cf.value, filepath, result))
fieldmap[cf.value] = result
count += 1
cf.value = result
cf.save()
self.stdout.write("Total converted: %d" % count)
self.stdout.write("Updated values: %s" % fieldmap)
self.stdout.write("Done!")
| bsd-3-clause | -1,941,675,224,798,410,000 | 39.163265 | 100 | 0.612805 | false |
ThiefMaster/logstapo | tests/test_util.py | 1 | 2754 | import itertools
import re
import pytest
from logstapo import util
@pytest.mark.parametrize(('regexps', 'string', 'expected'), (
('foo', 'bar', False),
('f.o', 'foo', True),
(['foo', 'bar'], 'bar', True),
(['foo', 'baz'], 'bar', False),
(['foo', '(b)ar', 'b(a)r'], 'bar', 'b'),
))
def test_try_match(regexps, string, expected):
regexps = re.compile(regexps) if isinstance(regexps, str) else list(map(re.compile, regexps))
match = util.try_match(regexps, string)
if not expected:
assert match is None
elif isinstance(expected, str):
assert match.groups()[0] == expected
else:
assert match is not None
@pytest.mark.parametrize('debug', (True, False))
def test_debug_echo(mocker, mock_config, debug):
mock_config({'debug': debug})
secho = mocker.patch('logstapo.util.click.secho')
util.debug_echo('test')
assert secho.called == debug
@pytest.mark.parametrize(('level', 'verbosity', 'debug'), itertools.product((1, 2), (0, 1, 2), (True, False)))
def test_verbose_echo(mocker, mock_config, level, verbosity, debug):
mock_config({'debug': debug, 'verbosity': verbosity})
secho = mocker.patch('logstapo.util.click.secho')
util.verbose_echo(level, 'test')
assert secho.called == (debug or (level <= verbosity))
def test_warning_echo(mocker):
secho = mocker.patch('logstapo.util.click.secho')
util.warning_echo('test')
assert secho.called
def test_error_echo(mocker):
secho = mocker.patch('logstapo.util.click.secho')
util.error_echo('test')
assert secho.called
@pytest.mark.parametrize(('text', 'chars', 'expected'), (
('test', '-', '----'),
('test', '-=', '-=-='),
('t', '-=', '-')
))
def test_underlines(text, chars, expected):
assert util.underlined(text, chars) == [text, expected]
@pytest.mark.parametrize(('value', 'collection_type', 'result'), (
('test', list, ['test']),
('test', set, {'test'}),
('test', tuple, ('test',)),
({'test'}, list, ['test']),
(('test',), set, {'test'}),
(['test'], tuple, ('test',)),
))
def test_ensure_collection(value, collection_type, result):
assert util.ensure_collection(value, collection_type) == result
def test_combine_placeholders():
assert util.combine_placeholders('test', {'test': 'xxx'}) == 'test'
assert util.combine_placeholders('test%(test)test', {'test': 'xxx'}) == 'testxxxtest'
assert util.combine_placeholders('test%(a)%(b)', {'a': '1%(b)', 'b': '2'}) == 'test122'
def test_combine_placeholders_cycle():
with pytest.raises(ValueError):
util.combine_placeholders('test%(a)', {'a': '%(a)'})
with pytest.raises(ValueError):
util.combine_placeholders('test%(a)', {'a': '%(b)', 'b': '%(a)'})
| mit | -3,326,630,402,476,973,600 | 31.023256 | 110 | 0.611474 | false |
metno/EVA | eva/executor/grid_engine.py | 1 | 19396 | import os
import re
import time
import dateutil.parser
import paramiko
import paramiko.agent
import paramiko.ssh_exception
import eva
import eva.base.executor
import eva.job
QACCT_CHECK_INTERVAL_MSECS = 2000
SSH_RECV_BUFFER = 4096
SSH_TIMEOUT = 5
SSH_RETRY_EXCEPTIONS = (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.socket.timeout,
paramiko.ssh_exception.socket.error,
)
EXIT_OK = 0
def create_job_unique_id(group_id, job_id):
"""!
@brief Given a EVA group_id and a job UUID, returns a valid job id for GridEngine.
"""
return u'eva.' + re.sub(r'[^a-zA-Z0-9]', u'-', group_id).strip(u'-') + u'.' + str(job_id)
def get_job_id_from_qsub_output(output):
"""!
Parse the JOB_ID from qsub output. Unfortunately, there is no command-line
option to make it explicitly machine readable, so we use a regular
expression to extract the first number instead.
"""
matches = re.search('\d+', output)
if not matches:
raise eva.exceptions.GridEngineParseException('Unparseable output from qsub: expected job id, but no digits in output: %s' % output)
return int(matches.group(0))
def get_job_id_from_qstat_output(output):
"""!
@brief Parse the JOB_ID from qstat output using a regular expression.
"""
regex = re.compile('^job_number:\s+(\d+)\s*$')
for line in output.splitlines():
matches = regex.match(line)
if matches:
return int(matches.group(1))
raise eva.exceptions.GridEngineParseException('Could not parse job_number from qstat output.')
def split_header_and_job(full_job):
"""
Split a job script into its header and command components.
Requires an input list, and returns two lists.
"""
headers = []
script = []
for line in full_job:
if line.startswith('#!') or line.startswith('#$'):
headers += [line]
else:
script += [line]
return headers, script
def get_exit_code_from_qacct_output(output):
"""!
@brief Parse the job exit code from qacct output using a regular expression.
"""
regex = re.compile('^exit_status\s+(\d+)\s*$')
for line in output.splitlines():
matches = regex.match(line)
if matches:
return int(matches.group(1))
raise eva.exceptions.GridEngineParseException('Could not parse exit_code from qacct output.')
def parse_qacct_metrics(stdout_lines):
"""!
@brief Given a list of qacct standard output, return a dictionary of
metric numbers and tags.
"""
metrics = {}
tags = {}
parsed = {}
base_regex = re.compile('^([\w_]+)\s+(.+)$')
for line in stdout_lines:
matches = base_regex.match(line)
if not matches:
continue
parsed[matches.group(1)] = matches.group(2).strip()
for key in ['qsub_time', 'start_time', 'end_time']:
if key in parsed:
try:
parsed[key] = dateutil.parser.parse(parsed[key])
except:
pass
if 'start_time' in parsed:
if 'end_time' in parsed:
metrics['eva_grid_engine_run_time'] = (parsed['end_time'] - parsed['start_time']).total_seconds() * 1000
if 'qsub_time' in parsed:
metrics['eva_grid_engine_qsub_delay'] = (parsed['start_time'] - parsed['qsub_time']).total_seconds() * 1000
for key in ['ru_utime', 'ru_stime']:
if key in parsed:
metrics['eva_grid_engine_' + key] = int(float(parsed[key]) * 1000)
for key in ['qname', 'hostname']:
if key in parsed:
tags['grid_engine_' + key] = parsed[key]
return {
'metrics': metrics,
'tags': tags,
}
def shift_list(lst):
"""
Move the first element of a list to the last, and shift the remainder to the left.
"""
return lst[1:] + lst[:1]
class JobNotFinishedException(eva.exceptions.EvaException):
pass
class GridEngineExecutor(eva.base.executor.BaseExecutor):
"""
``GridEngineExecutor`` executes programs on Sun OpenGridEngine via an SSH
connection to a submit host.
.. table::
=========================== ============== ====================== ========== ===========
Variable Type Default Inclusion Description
=========================== ============== ====================== ========== ===========
cpu_slots |positive_int| 1 optional How many CPU slots to allocate to the job.
memory |string| 512M optional The maximum amount of memory the job consumes.
modules |list_string| (empty) optional Comma-separated list of GridEngine modules to load before running the job.
qacct_command |string| qacct -j {{job_id}} optional How to call the qacct program to get finished job information.
queue |string| (empty) optional Which Grid Engine queue to run jobs in.
shell |string| /bin/bash optional Which shell to use for the submitted GridEngine job (parameter $ -S).
ssh_hosts |list_string| (empty) required List of Grid Engine submit hostnames.
ssh_user |string| (empty) required Username on the Grid Engine submit host.
ssh_key_file |string| (empty) required Path to a SSH private key used for connecting to the Grid Engine submit host.
=========================== ============== ====================== ========== ===========
"""
CONFIG = {
'cpu_slots': {
'type': 'positive_int',
'default': '1',
},
'memory': {
'type': 'string',
'default': '512M',
},
'modules': {
'type': 'list_string',
'default': '',
},
'qacct_command': {
'type': 'string',
'default': 'qacct -j {{job_id}}',
},
'queue': {
'type': 'string',
'default': '',
},
'shell': {
'type': 'string',
'default': '/bin/bash',
},
'ssh_hosts': {
'type': 'list_string',
'default': '',
},
'ssh_user': {
'type': 'string',
'default': '',
},
'ssh_key_file': {
'type': 'string',
'default': '',
},
}
OPTIONAL_CONFIG = [
'cpu_slots',
'memory',
'modules',
'qacct_command',
'queue',
'shell',
]
REQUIRED_CONFIG = [
'ssh_hosts',
'ssh_user',
'ssh_key_file',
]
def init(self):
"""!
@brief Initialize the class.
"""
# create command-line template for qacct.
self.qacct_command_template = self.template.from_string(self.env['qacct_command'])
self.ssh_hosts = self.env['ssh_hosts'][:]
def validate_configuration(self, *args, **kwargs):
"""!
@brief Make sure that the SSH key file exists.
"""
super(GridEngineExecutor, self).validate_configuration(*args, **kwargs)
if not os.access(self.env['ssh_key_file'], os.R_OK):
raise eva.exceptions.InvalidConfigurationException("The SSH key '%s' is not readable!" % self.env['ssh_key_file'])
def create_qacct_command(self, job_id):
"""!
@brief Return a string with a qacct command that should be used to
check the status of a GridEngine job.
"""
return self.qacct_command_template.render({'job_id': job_id})
def create_job_filename(self, *args):
"""!
@brief Generate a unique job name that can be used as a filename.
"""
return '.'.join(list(args))
def job_header(self):
"""
Create a job header.
"""
header = [
"#!/bin/bash",
"#$ -S %s" % self.env['shell'],
"#$ -pe shmem-1 %d" % self.env['cpu_slots'],
"#$ -l h_vmem=%s" % self.env['memory'],
]
for module in self.env['modules']:
header += ['module load %s' % module]
return header
def compile_command(self, command):
"""
Append a job header to an array of shell commands, and flatten it to a string.
"""
generated = self.job_header() + command
headers, script = split_header_and_job(generated)
combined = headers + script
return '\n'.join(combined) + '\n'
def ensure_ssh_connection(self, job):
"""!
@brief Ensure that a working SSH connection exists. Throws any of
SSH_RETRY_EXCEPTIONS if a working connection could not be established.
"""
if hasattr(self, 'ssh_client'):
try:
job.logger.debug('Checking if SSH connection is usable.')
self.ssh_client.exec_command('true')
job.logger.debug('SSH connection seems usable, proceeding.')
return
except Exception as e:
job.logger.debug('SSH connection not working, trying to establish a working connection: %s', e)
self.create_ssh_connection(job)
def create_ssh_connection(self, job):
"""!
@brief Open an SSH connection to the submit host, and open an SFTP channel.
"""
ssh_host = self.ssh_hosts[0]
self.ssh_hosts = shift_list(self.ssh_hosts)
job.logger.info('Creating SSH connection to %s@%s', self.env['ssh_user'], ssh_host)
self.ssh_client = paramiko.SSHClient()
self.ssh_client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.ssh_client.connect(ssh_host,
username=self.env['ssh_user'],
key_filename=self.env['ssh_key_file'],
timeout=SSH_TIMEOUT)
session = self.ssh_client.get_transport().open_session()
paramiko.agent.AgentRequestHandler(session)
self.sftp_client = self.ssh_client.open_sftp()
self.sftp_client.get_channel().settimeout(SSH_TIMEOUT)
def destroy_ssh_connection(self):
"""!
@brief Tear down the SSH connection.
"""
self.ssh_client.close()
del self.sftp_client
del self.ssh_client
def execute_ssh_command(self, command):
"""!
@brief Execute a command remotely using a new SSH channel.
@returns A tuple of (exit_status, stdout, stderr).
"""
stdout = ""
stderr = ""
channel = self.ssh_client.get_transport().open_channel('session',
timeout=SSH_TIMEOUT)
channel.get_pty()
channel.exec_command(command)
def recv_both(channel, stdout, stderr):
if channel.recv_ready():
stdout += channel.recv(SSH_RECV_BUFFER).decode('utf8')
if channel.recv_stderr_ready():
stderr += channel.recv_stderr(SSH_RECV_BUFFER).decode('utf8')
return stdout, stderr
while not channel.exit_status_ready():
time.sleep(0.1)
stdout, stderr = recv_both(channel, stdout, stderr)
exit_status = channel.recv_exit_status()
stdout, stderr = recv_both(channel, stdout, stderr)
channel.close()
return exit_status, stdout, stderr
def execute_async(self, job):
"""!
@brief Execute a job on Grid Engine.
"""
# Create SSH connection
try:
self.ensure_ssh_connection(job)
except SSH_RETRY_EXCEPTIONS as e:
raise eva.exceptions.RetryException(e)
# Generate a GridEngine job name for this job
job.job_name = create_job_unique_id(self.group_id, job.id)
# Generate paths
job.stdout_path = self.create_job_filename(job.job_name, 'stdout')
job.stderr_path = self.create_job_filename(job.job_name, 'stderr')
job.submit_script_path = self.create_job_filename(job.job_name, 'sh')
# Check whether a GridEngine task is already running for this job. If
# it is, we skip submitting the job and jump right to the qacct polling.
job.logger.info('Querying if job is already running.')
command = 'qstat -j %s' % job.job_name
try:
if job.pid is None:
exit_code, stdout, stderr = self.execute_ssh_command(command)
if exit_code == 0:
job.pid = get_job_id_from_qstat_output(stdout)
if job.pid is not None:
job.logger.warning('Job is already running with JOB_ID %d, will not submit a new job.', job.pid)
job.set_status(eva.job.RUNNING)
return
else:
job.logger.info('Job is not running, continuing with submission.')
except SSH_RETRY_EXCEPTIONS as e:
raise eva.exceptions.RetryException(e)
# Create a submit script
script = self.compile_command(job.command)
try:
with self.sftp_client.open(job.submit_script_path, 'w') as submit_script:
submit_script.write(script)
except SSH_RETRY_EXCEPTIONS as e:
raise eva.exceptions.RetryException(e)
# Print the job script to the log
eva.executor.log_job_script(job, script.splitlines())
# Submit the job using qsub
command = ['qsub',
'-N', job.job_name,
'-b', 'n',
'-sync', 'n',
'-o', job.stdout_path,
'-e', job.stderr_path,
]
# Run jobs in a specified queue
if self.env['queue']:
command += ['-q', self.env['queue']]
command += [job.submit_script_path]
command = ' '.join(command)
job.logger.info('Submitting job to GridEngine: %s', command)
# Execute command asynchronously
try:
exit_code, stdout, stderr = self.execute_ssh_command(command)
if exit_code != EXIT_OK:
raise eva.exceptions.RetryException(
'Failed to submit the job to GridEngine, exit code %d' %
exit_code
)
job.pid = get_job_id_from_qsub_output(eva.executor.get_std_lines(stdout)[0])
job.logger.info('Job has been submitted, JOB_ID = %d', job.pid)
job.set_status(eva.job.RUNNING)
job.set_next_poll_time(QACCT_CHECK_INTERVAL_MSECS)
except SSH_RETRY_EXCEPTIONS as e:
raise eva.exceptions.RetryException(e)
def poll_qacct_job(self, job):
"""
Run qacct to check if a job has completed.
:param eva.job.Job job: the Job object to check.
:raises JobNotFinishedException: when the job is not present in qacct output.
:rtype: tuple
:returns: Tuple of ``(exit_code, stdout, stderr)``. Note that the return values are those of the qacct poll command, and not the job submitted via qsub.
"""
check_command = self.create_qacct_command(job.pid)
job.logger.debug('Running: %s', check_command)
exit_code, stdout, stderr = self.execute_ssh_command(check_command)
if exit_code != EXIT_OK:
raise JobNotFinishedException('Job %d is not present in qacct output.' % job.pid)
return (exit_code, stdout, stderr)
def sync(self, job):
"""!
@brief Poll Grid Engine for job completion.
"""
# Create SSH connection and poll for job completion
try:
self.ensure_ssh_connection(job)
exit_code, stdout, stderr = self.poll_qacct_job(job)
except SSH_RETRY_EXCEPTIONS as e:
raise eva.exceptions.RetryException(e)
except JobNotFinishedException as e:
job.logger.debug(e)
job.set_next_poll_time(QACCT_CHECK_INTERVAL_MSECS)
return False
# Parse job exit code
try:
job.exit_code = get_exit_code_from_qacct_output(stdout)
except eva.exceptions.GridEngineParseException as e:
raise eva.exceptions.RetryException(
"Error while parsing exit code: %s" % e
)
# Reset process ID, it will interfere with re-running
job.pid = None
# Submit job metrics
stats = parse_qacct_metrics(stdout.splitlines())
for metric, value in stats['metrics'].items():
stats['tags']['adapter'] = job.adapter.config_id
self.statsd.timing(metric, value, stats['tags'])
# Retrieve stdout and stderr
try:
with self.sftp_client.open(job.stdout_path, 'r') as f:
job.stdout = eva.executor.strip_stdout_newlines(f.readlines())
with self.sftp_client.open(job.stderr_path, 'r') as f:
job.stderr = eva.executor.strip_stdout_newlines(f.readlines())
except SSH_RETRY_EXCEPTIONS + (IOError,) as e:
raise eva.exceptions.RetryException(
'Unable to retrieve stdout and stderr from finished Grid Engine job.'
)
# Set job status based on exit code
if job.exit_code == EXIT_OK:
job.set_status(eva.job.COMPLETE)
else:
job.set_status(eva.job.FAILED)
# Print stdout and stderr
eva.executor.log_stdout_stderr(job, job.stdout, job.stderr)
# Remove temporary cache files
self.cleanup_job_data(job)
def abort(self, job):
"""
Try to delete the job from GridEngine, and delete the job files.
"""
if job.pid is None:
job.logger.info('Job does not have a JOB_ID, no deletion from GridEngine necessary.')
return
command = 'qdel %d' % job.pid
try:
exit_code, stdout, stderr = self.execute_ssh_command(command)
except SSH_RETRY_EXCEPTIONS + (IOError,) as e:
raise eva.exceptions.RetryException("Unable to submit job for deletion: %s" % e)
if exit_code == 0:
job.logger.info('Job successfully submitted for deletion.')
else:
job.logger.warning('Job deletion failed with exit code %d.' % exit_code)
job.logger.warning('Ignoring error condition. Standand output and standard error of delete command follows.')
eva.executor.log_stdout_stderr(job, job.stdout, job.stderr)
self.cleanup_job_data(job)
def cleanup_job_data(self, job):
"""
Remove job script, stdout, and stderr caches.
"""
try:
self.sftp_client.unlink(job.submit_script_path)
self.sftp_client.unlink(job.stdout_path)
self.sftp_client.unlink(job.stderr_path)
except SSH_RETRY_EXCEPTIONS + (IOError,) as e:
job.logger.warning('Could not remove script file, stdout and stderr: %s', e)
| gpl-2.0 | -2,740,359,030,533,016,600 | 35.874525 | 165 | 0.554908 | false |
RyanSkraba/beam | sdks/python/apache_beam/examples/snippets/transforms/aggregation/mean_test.py | 1 | 1862 | # coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import mean
def check_mean_element(actual):
expected = '''[START mean_element]
2.5
[END mean_element]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_elements_with_mean_value_per_key(actual):
expected = '''[START elements_with_mean_value_per_key]
('🥕', 2.5)
('🍆', 1.0)
('🍅', 4.0)
[END elements_with_mean_value_per_key]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.aggregation.mean.print', str)
class MeanTest(unittest.TestCase):
def test_mean_globally(self):
mean.mean_globally(check_mean_element)
def test_mean_per_key(self):
mean.mean_per_key(check_elements_with_mean_value_per_key)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,183,699,974,009,859,000 | 29.883333 | 75 | 0.74258 | false |
cleverhans-lab/cleverhans | cleverhans/tf2/attacks/spsa.py | 1 | 17227 | # pylint: disable=missing-docstring
import tensorflow as tf
tf_dtype = tf.as_dtype("float32")
def spsa(
model_fn,
x,
y,
eps,
nb_iter,
clip_min=None,
clip_max=None,
targeted=False,
early_stop_loss_threshold=None,
learning_rate=0.01,
delta=0.01,
spsa_samples=128,
spsa_iters=1,
is_debug=False,
):
"""Tensorflow 2.0 implementation of SPSA.
This implements the SPSA adversary, as in https://arxiv.org/abs/1802.05666 (Uesato et al. 2018).
SPSA is a gradient-free optimization method, which is useful when the model is non-differentiable,
or more generally, the gradients do not point in useful directions.
:param model_fn: A callable that takes an input tensor and returns the model logits.
:param x: Input tensor.
:param y: Tensor with true labels. If targeted is true, then provide the target label.
:param eps: The size of the maximum perturbation, measured in the L-infinity norm.
:param nb_iter: The number of optimization steps.
:param clip_min: If specified, the minimum input value.
:param clip_max: If specified, the maximum input value.
:param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default,
will try to make the label incorrect. Targeted will instead try to move in the direction
of being more like y.
:param early_stop_loss_threshold: A float or None. If specified, the attack will end as soon as
the loss is below `early_stop_loss_threshold`.
:param learning_rate: Learning rate of ADAM optimizer.
:param delta: Perturbation size used for SPSA approximation.
:param spsa_samples: Number of inputs to evaluate at a single time. The true batch size
(the number of evaluated inputs for each update) is `spsa_samples *
spsa_iters`
:param spsa_iters: Number of model evaluations before performing an update, where each evaluation
is on `spsa_samples` different inputs.
:param is_debug: If True, print the adversarial loss after each update.
"""
if x.get_shape().as_list()[0] != 1:
raise ValueError("For SPSA, input tensor x must have batch_size of 1.")
optimizer = SPSAAdam(
lr=learning_rate, delta=delta, num_samples=spsa_samples, num_iters=spsa_iters
)
def loss_fn(x, label):
"""
Margin logit loss, with correct sign for targeted vs untargeted loss.
"""
logits = model_fn(x)
loss_multiplier = 1 if targeted else -1
return loss_multiplier * margin_logit_loss(
logits, label, nb_classes=logits.get_shape()[-1]
)
adv_x = projected_optimization(
loss_fn,
x,
y,
eps,
nb_iter,
optimizer,
clip_min,
clip_max,
early_stop_loss_threshold,
is_debug=is_debug,
)
return adv_x
class SPSAAdam(tf.optimizers.Adam):
"""Optimizer for gradient-free attacks in https://arxiv.org/abs/1802.05666.
Gradients estimates are computed using Simultaneous Perturbation Stochastic Approximation (SPSA),
combined with the ADAM update rule (https://arxiv.org/abs/1412.6980).
"""
def __init__(
self,
lr=0.01,
delta=0.01,
num_samples=128,
num_iters=1,
compare_to_analytic_grad=False,
):
super(SPSAAdam, self).__init__(lr=lr)
assert num_samples % 2 == 0, "number of samples must be even"
self._delta = delta
self._num_samples = num_samples // 2 # Since we mirror +/- delta later
self._num_iters = num_iters
self._compare_to_analytic_grad = compare_to_analytic_grad
def _get_delta(self, x, delta):
x_shape = x.get_shape().as_list()
delta_x = delta * tf.sign(
tf.random.uniform(
[self._num_samples] + x_shape[1:],
minval=-1.0,
maxval=1.0,
dtype=tf_dtype,
)
)
return delta_x
def _compute_gradients(self, loss_fn, x):
"""Compute a new value of `x` to minimize `loss_fn` using SPSA.
Args:
loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values.
`x` will be optimized to minimize `loss_fn(x)`.
x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument
in standard TF Optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A dict, with the same structure as `optim_state`, which have been updated.
"""
# Assumes `x` is a list, containing a [1, H, W, C] image.If static batch dimension is None,
# tf.reshape to batch size 1 so that static shape can be inferred.
assert len(x) == 1
static_x_shape = x[0].get_shape().as_list()
if static_x_shape[0] is None:
x[0] = tf.reshape(x[0], [1] + static_x_shape[1:])
assert x[0].get_shape().as_list()[0] == 1
x = x[0]
x_shape = x.get_shape().as_list()
def body(i, grad_array):
delta = self._delta
delta_x = self._get_delta(x, delta)
delta_x = tf.concat([delta_x, -delta_x], axis=0)
loss_vals = tf.reshape(
loss_fn(x + delta_x), [2 * self._num_samples] + [1] * (len(x_shape) - 1)
)
avg_grad = tf.reduce_mean(loss_vals * delta_x, axis=0) / delta
avg_grad = tf.expand_dims(avg_grad, axis=0)
new_grad_array = grad_array.write(i, avg_grad)
return i + 1, new_grad_array
def cond(i, _):
return i < self._num_iters
_, all_grads = tf.while_loop(
cond,
body,
loop_vars=[0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype)],
back_prop=False,
parallel_iterations=1,
)
avg_grad = tf.reduce_sum(all_grads.stack(), axis=0)
return [avg_grad]
def _apply_gradients(self, grads, x, optim_state):
"""Given a gradient, make one optimization step.
:param grads: list of tensors, same length as `x`, containing the corresponding gradients
:param x: list of tensors to update
:param optim_state: dict
Returns:
new_x: list of tensors, updated version of `x`
new_optim_state: dict, updated version of `optim_state`
"""
new_x = [None] * len(x)
new_optim_state = {
"t": optim_state["t"] + 1.0,
"m": [None] * len(x),
"u": [None] * len(x),
}
t = new_optim_state["t"]
for i in range(len(x)):
g = grads[i]
m_old = optim_state["m"][i]
u_old = optim_state["u"][i]
new_optim_state["m"][i] = self.beta_1 * m_old + (1.0 - self.beta_1) * g
new_optim_state["u"][i] = self.beta_2 * u_old + (1.0 - self.beta_2) * g * g
m_hat = new_optim_state["m"][i] / (1.0 - tf.pow(self.beta_1, t))
u_hat = new_optim_state["u"][i] / (1.0 - tf.pow(self.beta_2, t))
new_x[i] = x[i] - self.lr * m_hat / (tf.sqrt(u_hat) + self.epsilon)
return new_x, new_optim_state
def init_state(self, x):
"""Initialize t, m, and u"""
optim_state = {
"t": 0.0,
"m": [tf.zeros_like(v) for v in x],
"u": [tf.zeros_like(v) for v in x],
}
return optim_state
def minimize(self, loss_fn, x, optim_state):
"""Analogous to tf.Optimizer.minimize
:param loss_fn: tf Tensor, representing the loss to minimize
:param x: list of Tensor, analogous to tf.Optimizer's var_list
:param optim_state: A possibly nested dict, containing any optimizer state.
Returns:
new_x: list of Tensor, updated version of `x`
new_optim_state: dict, updated version of `optim_state`
"""
grads = self._compute_gradients(loss_fn, x)
return self._apply_gradients(grads, x, optim_state)
def margin_logit_loss(model_logits, label, nb_classes=10):
"""Computes difference between logit for `label` and next highest logit.
The loss is high when `label` is unlikely (targeted by default). This follows the same interface
as `loss_fn` for projected_optimization, i.e. it returns a batch of loss values.
"""
if "int" in str(label.dtype):
logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1)
else:
logit_mask = label
if "int" in str(logit_mask.dtype):
logit_mask = tf.cast(logit_mask, dtype=tf.float32)
try:
label_logits = tf.reduce_sum(logit_mask * model_logits, axis=-1)
except TypeError:
raise TypeError(
"Could not take row-wise dot product between logit mask, of dtype "
+ str(logit_mask.dtype)
+ " and model_logits, of dtype "
+ str(model_logits.dtype)
)
logits_with_target_label_neg_inf = model_logits - logit_mask * 99999
highest_nonlabel_logits = tf.reduce_max(logits_with_target_label_neg_inf, axis=-1)
loss = highest_nonlabel_logits - label_logits
return loss
def _project_perturbation(
perturbation, epsilon, input_image, clip_min=None, clip_max=None
):
"""
Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such
that the resulting adversarial example is between clip_min and clip_max, if applicable.
"""
if clip_min is None or clip_max is None:
raise NotImplementedError(
"_project_perturbation currently has clipping hard-coded in."
)
# Ensure inputs are in the correct range
with tf.control_dependencies(
[
tf.debugging.assert_less_equal(
input_image, tf.cast(clip_max, input_image.dtype)
),
tf.debugging.assert_greater_equal(
input_image, tf.cast(clip_min, input_image.dtype)
),
]
):
clipped_perturbation = tf.clip_by_value(perturbation, -epsilon, epsilon)
new_image = tf.clip_by_value(
input_image + clipped_perturbation, clip_min, clip_max
)
return new_image - input_image
def projected_optimization(
loss_fn,
input_image,
label,
epsilon,
num_steps,
optimizer,
clip_min=None,
clip_max=None,
early_stop_loss_threshold=None,
project_perturbation=_project_perturbation,
is_debug=False,
):
"""
Generic projected optimization, generalized to work with approximate gradients. Used for e.g.
the SPSA attack.
Args:
:param loss_fn: A callable which takes `input_image` and `label` as
arguments, and returns a batch of loss values.
:param input_image: Tensor, a batch of images
:param label: Tensor, a batch of labels
:param epsilon: float, the L-infinity norm of the maximum allowable
perturbation
:param num_steps: int, the number of steps of gradient descent
:param optimizer: A `SPSAAdam` object
:param clip_min: float, minimum pixel value
:param clip_max: float, maximum pixel value
:param project_perturbation: A function, which will be used to enforce
some constraint. It should have the same
signature as `_project_perturbation`.
:param early_stop_loss_threshold: A float or None. If specified, the attack will end if the loss is below
`early_stop_loss_threshold`.
Enabling this option can have several different effects:
- Setting the threshold to 0. guarantees that if a successful attack is found, it is returned.
This increases the attack success rate, because without early stopping the optimizer can accidentally
bounce back to a point where the attack fails.
- Early stopping can make the attack run faster because it may run for fewer steps.
- Early stopping can make the attack run slower because the loss must be calculated at each step.
The loss is not calculated as part of the normal SPSA optimization procedure.
For most reasonable choices of hyperparameters, early stopping makes the attack much faster because
it decreases the number of steps dramatically.
:param is_debug: A bool. If True, print debug info for attack progress.
Returns:
adversarial version of `input_image`, with L-infinity difference less than epsilon, which tries
to minimize loss_fn.
Note that this function is not intended as an Attack by itself. Rather, it is designed as a helper
function which you can use to write your own attack methods. The method uses a tf.while_loop to
optimize a loss function in a single sess.run() call.
"""
assert num_steps is not None
if is_debug:
with tf.device("/cpu:0"):
tf.print("Starting PGD attack with epsilon: %s" % epsilon)
init_perturbation = tf.random.uniform(
tf.shape(input_image),
minval=tf.cast(-epsilon, input_image.dtype),
maxval=tf.cast(epsilon, input_image.dtype),
dtype=input_image.dtype,
)
init_perturbation = project_perturbation(
init_perturbation, epsilon, input_image, clip_min=clip_min, clip_max=clip_max
)
init_optim_state = optimizer.init_state([init_perturbation])
def loop_body(i, perturbation, flat_optim_state):
"""Update perturbation to input image."""
optim_state = tf.nest.pack_sequence_as(
structure=init_optim_state, flat_sequence=flat_optim_state
)
def wrapped_loss_fn(x):
return loss_fn(input_image + x, label)
new_perturbation_list, new_optim_state = optimizer.minimize(
wrapped_loss_fn, [perturbation], optim_state
)
projected_perturbation = project_perturbation(
new_perturbation_list[0],
epsilon,
input_image,
clip_min=clip_min,
clip_max=clip_max,
)
# Be careful with this bool. A value of 0. is a valid threshold but evaluates to False, so we
# must explicitly check whether the value is None.
early_stop = early_stop_loss_threshold is not None
compute_loss = is_debug or early_stop
# Don't waste time building the loss graph if we're not going to use it
if compute_loss:
# NOTE: this step is not actually redundant with the optimizer step.
# SPSA calculates the loss at randomly perturbed points but doesn't calculate the loss at the current point.
loss = tf.reduce_mean(wrapped_loss_fn(projected_perturbation), axis=0)
if is_debug:
with tf.device("/cpu:0"):
tf.print(loss, "Total batch loss")
if early_stop:
i = tf.cond(
tf.less(loss, early_stop_loss_threshold),
lambda: float(num_steps),
lambda: i,
)
return i + 1, projected_perturbation, tf.nest.flatten(new_optim_state)
def cond(i, *_):
return tf.less(i, num_steps)
flat_init_optim_state = tf.nest.flatten(init_optim_state)
_, final_perturbation, _ = tf.while_loop(
cond,
loop_body,
loop_vars=(tf.constant(0.0), init_perturbation, flat_init_optim_state),
parallel_iterations=1,
back_prop=False,
maximum_iterations=num_steps,
)
if project_perturbation is _project_perturbation:
# TODO: this assert looks totally wrong.
# Not bothering to fix it now because it's only an assert.
# 1) Multiplying by 1.1 gives a huge margin of error. This should probably take the difference
# and allow a tolerance of 1e-6 or something like that.
# 2) I think it should probably check the *absolute value* of final_perturbation
perturbation_max = epsilon * 1.1
check_diff = tf.debugging.assert_less_equal(
final_perturbation,
tf.cast(perturbation_max, final_perturbation.dtype),
message="final_perturbation must change no pixel by more than %s"
% perturbation_max,
)
else:
# TODO: let caller pass in a check_diff function as well as
# project_perturbation
check_diff = tf.no_op()
if clip_min is None or clip_max is None:
raise NotImplementedError("This function only supports clipping for now")
check_range = [
tf.debugging.assert_less_equal(
input_image, tf.cast(clip_max, input_image.dtype)
),
tf.debugging.assert_greater_equal(
input_image, tf.cast(clip_min, input_image.dtype)
),
]
with tf.control_dependencies([check_diff] + check_range):
adversarial_image = input_image + final_perturbation
return tf.stop_gradient(adversarial_image)
| mit | 5,910,435,422,267,267,000 | 38.693548 | 120 | 0.605039 | false |
MrLeeh/jsonwatchqt | jsonwatchqt/objectexplorer.py | 1 | 15214 | """
Copyright (c) 2015 by Stefan Lehmann
"""
import os
import sys
import re
import logging
from qtpy.QtCore import QModelIndex, Qt, QAbstractItemModel, QMimeData, \
QByteArray, QDataStream, QIODevice, QPoint
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QTreeView, QItemDelegate, QSpinBox, \
QDoubleSpinBox, QMenu, QAction, QInputDialog, QDialog
from jsonwatch.abstractjsonitem import AbstractJsonItem
from jsonwatch.jsonnode import JsonNode
from jsonwatch.jsonitem import JsonItem
from jsonwatchqt.itemproperties import ItemPropertyDialog
from jsonwatchqt.utilities import pixmap
from pyqtconfig.qt import pyqtSignal
logger = logging.getLogger("jsonwatchqt.objectexplorer")
def extract_number(s: str):
return float(re.findall('([-+]?[\d.]+)', s)[0])
class Column():
def __init__(self, name, label=None):
self.name = name
self.label = label or name
class MyItemDelegate(QItemDelegate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.update = False
def createEditor(self, parent, options, index):
self.update = True
node = index.internalPointer()
if isinstance(node, JsonItem):
if node.type in ('float', 'int'):
editor = QDoubleSpinBox(parent)
editor.setSuffix(node.unit or "")
editor.setRange(node.min or -sys.maxsize,
node.max or sys.maxsize)
editor.setGeometry(options.rect)
editor.setDecimals(node.decimals or 0)
editor.show()
return editor
else:
return super().createEditor(parent, options, index)
def destroyEditor(self, editor, index):
self.update = True
super().destroyEditor(editor, index)
def setEditorData(self, editor, index):
if self.update:
self.update = False
node = index.internalPointer()
if node.type in ('int', 'float'):
try:
editor.setValue(node.value)
except TypeError:
pass
else:
return super().setEditorData(editor, index)
def setModelData(self, editor, model, index):
if isinstance(editor, (QSpinBox, QDoubleSpinBox)):
print(editor.value())
model.setData(index, editor.value())
else:
super().setModelData(editor, model, index)
class JsonDataModel(QAbstractItemModel):
def __init__(self, rootnode: JsonNode, mainwindow, parent=None):
super().__init__(parent)
self.mainwindow = mainwindow
self.root = rootnode
self.root.child_added_callback = self.insert_row
self.columns = [
Column('key'),
Column('name'),
Column('value')
]
def index(self, row, column, parent=QModelIndex()):
parent_node = self.node_from_index(parent)
return self.createIndex(row, column, parent_node.item_at(row))
def parent(self, index=QModelIndex()):
node = self.node_from_index(index)
if node is None:
return QModelIndex()
parent = node.parent
if parent is None:
return QModelIndex()
grandparent = parent.parent
if grandparent is None:
return QModelIndex()
row = grandparent.index(parent)
assert row != -1
return self.createIndex(row, 0, parent)
def data(self, index=QModelIndex(), role=Qt.DisplayRole):
if not index.isValid():
return
node = index.internalPointer()
column = self.columns[index.column()]
if role in (Qt.DisplayRole, Qt.EditRole):
if column.name == 'key':
return node.key
if column.name == 'name':
return node.name
elif column.name == 'value':
if isinstance(node, JsonItem):
if node.value is None:
if node.type != 'bool':
return "-"
else:
if node.type in ('int', 'float'):
return node.value_str() + ' ' + node.unit
elif role == Qt.CheckStateRole:
if column.name == 'value':
if isinstance(node, JsonItem):
if node.type == 'bool':
return Qt.Checked if node.value else Qt.Unchecked
elif role == Qt.DecorationRole:
if column.name == 'key':
if node.up_to_date and self.mainwindow.connected:
return pixmap("emblem_uptodate.png")
else:
return pixmap("emblem_outofdate.png")
def setData(self, index: QModelIndex, value, role=Qt.EditRole):
if not index.isValid():
return False
node = index.internalPointer()
if role == Qt.EditRole:
if isinstance(node, JsonItem):
if node.type in ('float', 'int', None):
node.value = value
try: # PyQt5
self.dataChanged.emit(index, index, [Qt.EditRole])
except TypeError: # PyQt4, PySide
self.dataChanged.emit(index, index)
elif role == Qt.CheckStateRole:
if isinstance(node, JsonItem):
if node.type == 'bool':
node.value = value == Qt.Checked
try: # PyQt5
self.dataChanged.emit(
index, index, [Qt.CheckStateRole])
except TypeError: # PyQt4, PySide
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index: QModelIndex):
flags = (Qt.NoItemFlags | Qt.ItemIsDragEnabled | Qt.ItemIsSelectable |
Qt.ItemIsEnabled)
if index.isValid():
node = self.node_from_index(index)
column = self.columns[index.column()].name
if isinstance(node, JsonItem):
if column == 'value' and not node.readonly:
if not node.type == 'bool':
flags |= Qt.ItemIsEditable
else:
flags |= Qt.ItemIsUserCheckable
return flags
def mimeTypes(self):
return ["application/x-nodepath.list"]
def mimeData(self, indexes):
def path(x):
return "/".join(x.path)
def node(x):
return self.node_from_index(x)
mimedata = QMimeData()
data = QByteArray()
stream = QDataStream(data, QIODevice.WriteOnly)
for path in set(path(node(index)) for index
in indexes if index.isValid()):
stream.writeQString(path)
mimedata.setData("application/x_nodepath.list", data)
return mimedata
def columnCount(self, parent=QModelIndex()):
return len(self.columns)
def rowCount(self, parent=QModelIndex()):
node = self.node_from_index(parent)
return len(node)
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return self.columns[section].label
else:
return str(section)
def supportedDragActions(self):
return Qt.CopyAction | Qt.MoveAction
def refresh(self):
try: # PyQt5
self.dataChanged.emit(
QModelIndex(), QModelIndex(), [Qt.DisplayRole])
except TypeError: # PyQt4, PySide
self.dataChanged.emit(QModelIndex(), QModelIndex())
def node_from_index(self, index):
return index.internalPointer() if index.isValid() else self.root
def index_from_node(self, node):
def iter_model(parent):
if parent.internalPointer() == node:
return parent
for row in range(self.rowCount(parent)):
index = self.index(row, 0, parent)
if (index is not None and index.isValid() and
index.internalPointer() == node):
return index
res = iter_model(index)
if res is not None:
return res
return iter_model(QModelIndex())
def insert_row(self, jsonitem):
parent_node = jsonitem.parent
row = parent_node.index(jsonitem)
parent = self.index_from_node(parent_node)
if parent is None:
parent = QModelIndex()
self.beginInsertRows(parent, row, row)
self.endInsertRows()
class ObjectExplorer(QTreeView):
nodevalue_changed = pyqtSignal(AbstractJsonItem)
nodeproperty_changed = pyqtSignal(AbstractJsonItem)
def __init__(self, rootnode: JsonNode, parent):
super().__init__(parent)
self.mainwindow = parent
self.setModel(JsonDataModel(rootnode, self.mainwindow))
self.model().dataChanged.connect(self.data_changed)
self.setItemDelegate(MyItemDelegate())
self.setDragDropMode(QTreeView.DragDrop)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.doubleClicked.connect(self.double_clicked)
# context menu
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_contextmenu)
# actions
# properties action
self.propertiesAction = QAction(self.tr("properties"), self)
self.propertiesAction.setIcon(QIcon(pixmap("document_properties.png")))
self.propertiesAction.triggered.connect(self.show_properties)
# edit action
self.editAction = QAction(self.tr("edit value"), self)
self.editAction.setShortcut("F2")
self.editAction.setIcon(QIcon(pixmap("edit.png")))
self.editAction.triggered.connect(self.edit_value)
# edit key action
self.editkeyAction = QAction(self.tr("edit key"), self)
self.editkeyAction.setIcon(QIcon(pixmap("kgpg_key1_kgpg.png")))
self.editkeyAction.triggered.connect(self.edit_key)
# insert item action
self.insertitemAction = QAction(self.tr("insert item"), self)
self.insertitemAction.setIcon(QIcon(pixmap("list_add.png")))
self.insertitemAction.triggered.connect(self.insert_item)
# insert node action
self.insertnodeAction = QAction(self.tr("insert node"), self)
self.insertnodeAction.setIcon(QIcon(pixmap("list_add.png")))
self.insertnodeAction.triggered.connect(self.insert_node)
# remove item action
self.removeitemAction = QAction(self.tr("remove"), self)
self.removeitemAction.setIcon(QIcon(pixmap("list_remove")))
self.removeitemAction.triggered.connect(self.remove_item)
def data_changed(self, topleft, bottomright, *args):
node = topleft.internalPointer()
if node is not None and isinstance(node, JsonItem):
self.nodevalue_changed.emit(node)
def double_clicked(self, *args, **kwargs):
index = self.currentIndex()
if not index.isValid():
return
column = self.model().columns[index.column()]
if column.name == "value":
self.edit_value()
else:
self.show_properties()
def edit_key(self):
index = self.currentIndex()
if index.isValid():
node = index.internalPointer()
key, b = QInputDialog.getText(
self, "Edit Json item", "Insert new key for item:",
text=node.key
)
if not b:
return
node.key = key
try: # PyQt5
self.model().dataChanged.emit(index, index, [Qt.DisplayRole])
except TypeError: # PyQt4, PySide
self.model().dataChanged.emit(index, index)
def edit_value(self):
index = self.currentIndex()
if not index.isValid():
return
i = self.model().index(index.row(), 2, index.parent())
self.edit(i)
def insert_item(self):
index = self.currentIndex()
if index.isValid():
node = index.internalPointer()
else:
node = self.model().root
key, b = QInputDialog.getText(
self, "Insert Json item", "Insert key for new item:")
if not b:
return
item = JsonItem(key)
node.add(item)
row = node.index(item)
self.model().beginInsertRows(index, row, row)
self.model().endInsertRows()
def insert_node(self):
index = self.currentIndex()
parentnode = index.internalPointer() or self.model().root
key, b = QInputDialog.getText(
self, "Insert Json node", "Insert key for new node:")
if not b:
return
node = JsonNode(key)
parentnode.add(node)
row = parentnode.index(node)
self.model().beginInsertRows(index, row, row)
self.model().endInsertRows()
def mousePressEvent(self, event):
index = self.indexAt(event.pos())
if not index.isValid():
self.setCurrentIndex(QModelIndex())
super().mousePressEvent(event)
def refresh(self):
self.model().refresh()
def remove_item(self):
index = self.currentIndex()
self.model().beginRemoveRows(index.parent(), index.row(), index.row())
if index.isValid():
node = index.internalPointer()
if node.parent is not None:
node.parent.remove(node.key)
self.model().refresh()
self.model().endRemoveRows()
def show_contextmenu(self, pos: QPoint):
menu = QMenu(self)
index = self.currentIndex()
node = index.internalPointer()
# insert item and node
menu.addAction(self.insertitemAction)
menu.addAction(self.insertnodeAction)
# edit key
if isinstance(node, (JsonNode, JsonItem)):
menu.addSeparator()
menu.addAction(self.editkeyAction)
if isinstance(node, JsonItem):
menu.addAction(self.editAction)
self.editAction.setEnabled(not node.readonly)
# remove
if isinstance(node, (JsonNode, JsonItem)):
menu.addSeparator()
menu.addAction(self.removeitemAction)
# properties
if isinstance(node, JsonItem):
menu.addSeparator()
menu.addAction(self.propertiesAction)
menu.setDefaultAction(self.propertiesAction)
menu.popup(self.viewport().mapToGlobal(pos), self.editAction)
def show_properties(self):
index = self.currentIndex()
node = index.internalPointer()
if not (index.isValid() and isinstance(node, JsonItem)):
return
dlg = ItemPropertyDialog(node, self.parent())
if dlg.exec_() == QDialog.Accepted:
self.nodeproperty_changed.emit(node)
| mit | -7,191,704,010,720,514,000 | 32.073913 | 79 | 0.577034 | false |
SMISC/api | api/app.py | 1 | 15874 | import atexit
import functools
import gc
import socket
import database
import flask
import logging
import time
import json
import sys
from logging.handlers import SysLogHandler
from logging import StreamHandler
from datetime import datetime
from statsd import statsd
from flask import Flask
from ConfigParser import ConfigParser
from functools import wraps
from cassandra.cluster import Cluster as CassandraCluster
from cassandra.auth import PlainTextAuthProvider
from util import TIME_BOT_COMPETITION_START
from util import timeline
from util import cursor
from util import make_json_response
from util import not_implemented
from util import temporal
from util import get_time_anchor
from util import get_current_virtual_time
from util import translate_alpha_time_to_virtual_time
from util import translate_virtual_time_to_alpha_time
from util import nearest_scan
from util import disabled_after_competition_ends
from util import beta_predicate_tweets
from util import beta_predicate_users
from util import beta_predicate_observations
from util import we_are_out_of_beta
from util import timed
from util import track_pageview
from util import process_guess_scores
from util import get_score_bonus
from formatter import UserFormatter, TweetFormatter, GuessFormatter, EdgeFormatter
from search import Search
from detectionteam import DetectionTeam
from guess import Guess
from guess_user import GuessUser
from tuser import TUser
from tuser import TwitterUser
from tweet import Tweet
from scan import Scan
from bot import Bot
from tweet_entity import TweetEntity
config = ConfigParser()
config.read('configuration.ini')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%s:%s@/pacsocial?host=%s' % (config.get('postgresql', 'username'), config.get('postgresql', 'password'), config.get('postgresql', 'socket'))
app.config['SQLALCHEMY_ECHO'] = False
database.db.init_app(app)
def cassandrafied(f):
@wraps(f)
def decorator(*args, **kwargs):
cluster = CassandraCluster([config.get('cassandra', 'contact')], auth_provider=PlainTextAuthProvider(username=config.get('cassandra', 'username'), password=config.get('cassandra', 'password')), executor_threads=50)
session = cluster.connect('smisc')
kwargs['cassandra_cluster'] = session
try:
return f(*args, **kwargs)
finally:
session.shutdown()
cluster.shutdown()
# grumble grumble grumble, cassandra people caused memory leaks by assuming atexit is called
for ext in atexit.__dict__['_exithandlers']:
(handler, args, kwargs) = ext
if isinstance(handler, functools.partial) and len(handler.args) > 0 and isinstance(handler.args[0], CassandraCluster) and handler.func.func_name == '_shutdown_cluster':
atexit.__dict__['_exithandlers'].remove(ext)
gc.collect()
return decorator
def require_passcode(f):
@wraps(f)
def decorator(*args, **kwargs):
if 'Authorization' not in flask.request.headers:
return flask.make_response('', 401)
password = flask.request.headers['Authorization'].replace('Bearer ', '')
team = DetectionTeam.query.filter(DetectionTeam.password == password).first()
if team is None:
return flask.make_response('', 403)
kwargs['team_id'] = team.id
return f(*args, **kwargs)
return decorator
@app.route('/clock', methods=['GET'], defaults={'vtime': None})
@app.route('/clock/<vtime>', methods=['GET'])
@timed('page.clock.render')
@make_json_response
@temporal
@track_pageview
def show_clock(vtime):
time_str = lambda t: datetime.fromtimestamp(t+(3600*-8)).strftime("%b %d %Y, %I:%M:%S %p PDT")
virtual = vtime
anchor = get_time_anchor()
min_vtime = TIME_BOT_COMPETITION_START
return json.dumps({
"now": {
"alpha": translate_virtual_time_to_alpha_time(vtime),
"alpha_str":time_str(translate_virtual_time_to_alpha_time(vtime)),
"virtual": vtime,
"virtual_str": time_str(vtime)
},
"minimum": {
"alpha": translate_virtual_time_to_alpha_time(min_vtime),
"alpha_str":time_str(translate_virtual_time_to_alpha_time(min_vtime)),
"virtual": min_vtime,
"virtual_str": time_str(min_vtime)
},
"anchor": anchor,
"alpha_str": time_str(anchor)
})
@app.route('/edges/near/<vtime>/followers/<user_id>', methods=['GET'])
@app.route('/edges/followers/<user_id>', methods=['GET'], defaults={'vtime': None})
@timed('page.edges_followers.render')
@make_json_response
@temporal
@timeline()
@nearest_scan(Scan.SCAN_TYPE_FOLLOWERS)
@cassandrafied
@track_pageview
def timeless_list_followers(cassandra_cluster, vtime, user_id, max_id, since_id, since_count, max_scan_id, min_scan_id):
user = beta_predicate_users(TwitterUser.query.filter(TwitterUser.twitter_id == user_id)).first()
if user is not None:
id_condition = ""
ids = []
wanted_min_id = since_id+1
wanted_max_id = max_id+1
if min_scan_id is not None and max_scan_id is not None:
wanted_min_id = max(since_id+1, min_scan_id)
wanted_max_id = min(max_id+1, max_scan_id)
elif min_scan_id is not None and max_scan_id is None:
wanted_min_id = max(since_id+1, min_scan_id)
elif min_scan_id is None and max_scan_id is not None:
wanted_max_id = min(max_id+1, max_scan_id)
conds = [user_id]
id_condition = 'id >= %s'
conds.append(wanted_min_id)
if wanted_max_id != float('+inf'):
id_condition += ' and id < %s'
conds.append(wanted_max_id)
rows = cassandra_cluster.execute("SELECT id, to_user, from_user, \"timestamp\" FROM tuser_tuser WHERE to_user = %s AND " + id_condition + " ORDER BY id DESC LIMIT " + str(since_count), tuple(conds))
formatter = EdgeFormatter()
return json.dumps(formatter.format(rows))
else:
return flask.make_response('', 404)
@app.route('/edges/explore/<vtime>/<from_user>/to/<to_user>', methods=['GET'])
@timed('page.edges_explore.render')
@make_json_response
@temporal
@timeline()
@nearest_scan(Scan.SCAN_TYPE_FOLLOWERS)
@cassandrafied
@track_pageview
def timeless_explore_edges(cassandra_cluster, vtime, from_user, to_user, max_id, since_id, since_count, max_scan_id, min_scan_id):
to_user = beta_predicate_users(TwitterUser.query.filter(TwitterUser.user_id == to_user)).first()
if to_user is not None:
id_condition = ""
ids = []
wanted_min_id = since_id
wanted_max_id = max_id+1
if max_scan_id is not None:
wanted_max_id = min(max_id+1, max_scan_id)
elif max_scan_id is not None:
wanted_max_id = min(max_id+1, max_scan_id)
conds = [to_user.user_id, from_user]
id_condition = 'id > %s'
conds.append(wanted_min_id)
if wanted_max_id != float('+inf'):
id_condition += ' and id < %s'
conds.append(wanted_max_id)
logging.info(id_condition)
logging.info(conds)
rows = cassandra_cluster.execute("SELECT id, to_user, from_user, \"timestamp\" FROM tuser_tuser_inspect WHERE to_user = %s AND from_user = %s AND " + id_condition + " ORDER BY id DESC LIMIT " + str(since_count), tuple(conds))
formatter = EdgeFormatter()
return json.dumps(formatter.format(rows))
else:
return flask.make_response('', 404)
@app.route('/user/near/<vtime>', methods=['GET'])
@app.route('/user', methods=['GET'], defaults={'vtime': None})
@timed('page.user_list.render')
@make_json_response
@temporal
@cursor(1E4)
@nearest_scan(Scan.SCAN_TYPE_USER)
@track_pageview
def list_users(vtime, cursor_size, offset, max_scan_id, min_scan_id):
users = beta_predicate_observations(TUser.query.filter(
TUser.id >= min_scan_id,
TUser.id <= max_scan_id
)).order_by(TUser.id.desc()).limit(cursor_size).offset(offset).all()
formatter = UserFormatter()
return json.dumps(formatter.format(users))
@app.route('/user/near/<vtime>/<user_id>', methods=['GET'])
@app.route('/user/<user_id>', methods=['GET'], defaults={'vtime': None})
@timed('page.user_get.render')
@make_json_response
@temporal
@nearest_scan(Scan.SCAN_TYPE_USER)
@track_pageview
def show_user(vtime, user_id, max_scan_id, min_scan_id):
user = beta_predicate_observations(TUser.query.filter(
TUser.user_id == user_id,
TUser.id >= min_scan_id,
TUser.id <= max_scan_id
)).order_by(TUser.id.desc()).limit(1).first()
if user is None:
return flask.make_response('', 404)
else:
formatter = UserFormatter()
return json.dumps(formatter.format(user))
@app.route('/tweets/count/time/<start>/until/<end>', methods=['GET'])
@timed('page.count_tweets_time.render')
@make_json_response
@track_pageview
def count_tweets_by_time(start, end):
tweets = beta_predicate_tweets(Tweet.query.filter(
Tweet.timestamp >= TIME_BOT_COMPETITION_START,
Tweet.timestamp <= translate_alpha_time_to_virtual_time(time.time()),
Tweet.timestamp >= translate_alpha_time_to_virtual_time(int(start)),
Tweet.timestamp < translate_alpha_time_to_virtual_time(int(end))
)).count()
return json.dumps({'tweets': tweets})
@app.route('/tweets/count/id/<start>/until/<end>', methods=['GET'])
@timed('page.count_tweets_id.render')
@make_json_response
@track_pageview
def count_tweets_by_id(start, end):
tweets = beta_predicate_tweets(Tweet.query.filter(
Tweet.timestamp >= TIME_BOT_COMPETITION_START,
Tweet.timestamp <= translate_alpha_time_to_virtual_time(time.time()),
Tweet.tweet_id >= start,
Tweet.tweet_id < end,
)).count()
return json.dumps({'tweets': tweets})
@app.route('/user/near/<vtime>/<user_id>/tweets', methods=['GET'])
@app.route('/user/<user_id>/tweets', methods=['GET'], defaults={'vtime': None})
@timed('page.user_tweets.render')
@make_json_response
@temporal
@timeline()
@track_pageview
def list_tweets_by_user(vtime, max_id, since_id, since_count, user_id):
user = beta_predicate_observations(TUser.query.filter(
TUser.user_id == user_id
)).limit(1).first()
if user is None:
return flask.make_response('', 404)
else:
tweets = beta_predicate_tweets(Tweet.query.filter(
Tweet.timestamp >= TIME_BOT_COMPETITION_START,
Tweet.tweet_id > since_id,
Tweet.tweet_id <= max_id,
Tweet.timestamp <= vtime,
Tweet.user_id == user_id
)).order_by(Tweet.tweet_id.desc()).limit(since_count).all()
formatter = TweetFormatter()
return json.dumps(formatter.format(tweets))
@app.route('/tweets/near/<vtime>', methods=['GET'])
@app.route('/tweets', methods=['GET'], defaults={'vtime': None})
@timed('page.tweets.render')
@make_json_response
@temporal
@timeline()
@track_pageview
def list_tweets(vtime, max_id, since_id, since_count):
tweets = beta_predicate_tweets(Tweet.query.filter(
Tweet.timestamp >= TIME_BOT_COMPETITION_START,
Tweet.tweet_id > since_id,
Tweet.tweet_id <= max_id,
Tweet.timestamp <= vtime
)).order_by(Tweet.tweet_id.desc()).limit(int(since_count)).all()
formatter = TweetFormatter()
return json.dumps(formatter.format(tweets))
@app.route('/search', methods=['GET', 'POST'])
@timed('page.search.render')
@make_json_response
@timeline()
@track_pageview
def search(max_id, since_id, since_count):
tweets_query = beta_predicate_tweets(Tweet.query.filter(
Tweet.timestamp >= TIME_BOT_COMPETITION_START,
Tweet.timestamp < get_current_virtual_time(),
Tweet.tweet_id > since_id,
Tweet.tweet_id <= max_id
)).order_by(Tweet.tweet_id.desc())
debug = []
search = Search(flask.request.values['q'], debug)
tree = search.parse()
ors = search.apply(tweets_query, tree)
if 'users' in flask.request.values:
tweet_query.filter(Tweet.user_id.in_(flask.request.values['users']))
tweets = tweets_query.filter(ors).limit(since_count).all()
formatter = TweetFormatter()
resp = json.dumps(formatter.format(tweets))
if 'X-Debug' in flask.request.headers:
return flask.make_response(resp, 200, {'Debug': debug})
else:
return resp
@app.route('/guess/<guess_id>', methods=['GET'])
@timed('page.guess_get.render')
@make_json_response
@require_passcode
@track_pageview
def show_guess(team_id, guess_id):
guess = Guess.query.filter(Guess.team_id == team_id, Guess.id == guess_id).first()
if guess is None:
return flask.make_response('', 404)
scores = process_guess_scores(guess)
formatter = GuessFormatter()
return json.dumps(formatter.format(guess, scores))
@app.route('/guess', methods=['GET'])
@timed('page.guess_list.render')
@make_json_response
@require_passcode
@track_pageview
def list_guesses(team_id):
guesses = Guess.query.filter(Guess.team_id == team_id).all()
scores = dict()
for guess in guesses:
scores[guess.id] = dict()
for guess in guesses:
scores[guess.id] = process_guess_scores(guess)
formatter = GuessFormatter()
return json.dumps(formatter.format(guesses, scores))
@app.route('/guess', methods=['PUT', 'POST'])
@timed('page.guess_make.render')
@make_json_response
@disabled_after_competition_ends
@require_passcode
@track_pageview
def make_guess(team_id):
if 'bots' in flask.request.values:
bot_guesses = flask.request.values.getlist('bots')
if 'bots' not in flask.request.values or not len(bot_guesses):
return flask.make_response('', 400)
guess = Guess(team_id=team_id, timestamp=time.time(), beta=(not we_are_out_of_beta()))
database.db.session.add(guess)
database.db.session.commit()
for bot in bot_guesses:
guess_user = GuessUser(guess_id=guess.id, tuser_id=bot)
database.db.session.add(guess_user)
database.db.session.commit()
guess = Guess.query.filter(Guess.id == guess.id).first()
scores = process_guess_scores(guess)
formatter = GuessFormatter()
return json.dumps(formatter.format(guess, scores))
@app.route('/scorecard', methods=['GET'], defaults={'gtime': None})
@app.route('/scorecard/near/<gtime>', methods=['GET'])
@timed('page.scorecard.render')
@make_json_response
@require_passcode
@track_pageview
def get_scorecard(team_id, gtime):
if gtime is None:
gtime = round(time.time())
guesses = Guess.query.filter(Guess.beta == False, Guess.team_id == team_id, Guess.timestamp <= gtime).all()
net_score = 0
negative_score = 0
positive_score = 0
scores_by_user = dict() # user -> score; prevent same user counting multiple times
for guess in guesses:
guess_scores = process_guess_scores(guess)
for (user, score) in guess_scores.items():
if score is not None:
scores_by_user[user] = score
for (user, score) in scores_by_user.items():
net_score += score
if score > 0:
positive_score += score
elif score < 0:
negative_score += score
(finished, bonus) = get_score_bonus(team_id, guesses)
return json.dumps({
'score_subtotal': net_score,
'score_total': net_score + bonus,
'finished': finished,
'bonus': bonus,
'negative_score': negative_score,
'positive_score': positive_score
})
syslog = SysLogHandler('/dev/log', SysLogHandler.LOG_DAEMON, socket.SOCK_STREAM)
syslog.setLevel(logging.DEBUG)
app.logger.addHandler(syslog)
if __name__ == "__main__":
app.debug = True
app.logger.addHandler(StreamHandler(sys.stdout))
app.run(host='0.0.0.0')
| mit | -732,051,830,725,230,300 | 32.918803 | 233 | 0.658309 | false |
Remiii/remiii-ffmpeg-filters | titles.py | 1 | 1998 | #! /usr/bin/env python3
import json
import os
import sys
import time
from urllib import request
from filter import Filter, FilterChain, Position, TimeRange
from filters import DrawBox, DrawText, Overlay
from resource import Resource, Resources
TMP_DIR = '/tmp'
FILTER_TYPES = {
'box': DrawBox,
'text': DrawText,
'overlay': Overlay
}
resources_dict = {}
timestamp = time.time()
resources = Resources()
filters = FilterChain()
if len(sys.argv) < 4:
print('Usage: ./titles.py <json> <input.mp4> <output.mp4>')
exit(1)
with open(sys.argv[1]) as f:
json = json.load(f)
input_path = sys.argv[2]
resource_path = os.path.join(TMP_DIR, input_path + str(timestamp))
os.mkdir(resource_path)
resources.add(input_path)
for i, (filename, url) in enumerate(json.get('resources', {}).items(), start=1):
resources_dict[filename] = i
path = url
if url.startswith('http'):
path = os.path.join(resource_path, filename)
with request.urlopen(url) as req:
with open(path, 'wb') as f:
f.write(req.read())
resources.add(path)
for f in json['filters']:
filter_type = f['type']
resource = None
if filter_type == 'overlay':
resource = resources[resources_dict[f['resource']]]
pos = f['position']
position = None
if 'x' in pos and 'y' in pos:
position = Position(pos['x'], pos['y'])
elif 'place' in pos:
position = pos['place']
time_range = None
if 'timestamp' in f:
time_range = TimeRange(f['timestamp'].get('start'),
f['timestamp'].get('end'))
options = f.get('options', {})
filters.append(
FILTER_TYPES[filter_type](position=position, resource=resource,
time_range=time_range, **options),
layer=pos.get('z', 0)
)
print('ffmpeg', '-y', '-r 24', resources, filters,
'\\\n\t-map', '"[{}]"'.format(filters[-1].sink), sys.argv[3])
print('rm -rf', resource_path)
| mit | 4,597,829,575,260,255,700 | 24.948052 | 80 | 0.603103 | false |
borjam/exabgp | src/exabgp/logger/option.py | 2 | 4081 | import os
import sys
import time
import logging
from exabgp.logger.handler import getLogger
from exabgp.logger.format import formater
def echo(_):
return _
class option(object):
logger = None
formater = echo
short = False
level = 'WARNING'
logit = {}
cwd = ''
# where the log should go, stdout, stderr, file, syslog, ...
destination = ''
enabled = {
'pdb': False,
'reactor': False,
'daemon': False,
'processes': False,
'configuration': False,
'network': False,
'wire': False,
'message': False,
'rib': False,
'timer': False,
'routes': False,
'parser': False,
}
@classmethod
def _set_level(cls, level):
cls.level = level
levels = 'FATAL CRITICAL ERROR WARNING INFO DEBUG NOTSET'
index = levels.index(level)
for level in levels.split():
cls.logit[level] = levels.index(level) <= index
@classmethod
def log_enabled(cls, source, level):
return cls.enabled.get(source, True) and cls.logit.get(level, False)
@classmethod
def load(cls, env):
cls.pid = os.getpid()
cls.cwd = os.getcwd()
cls.short = env.log.short
cls._set_level(env.log.level)
cls.option = {
'pdb': env.debug.pdb,
'reactor': env.log.enable and (env.log.all or env.log.reactor),
'daemon': env.log.enable and (env.log.all or env.log.daemon),
'processes': env.log.enable and (env.log.all or env.log.processes),
'configuration': env.log.enable and (env.log.all or env.log.configuration),
'network': env.log.enable and (env.log.all or env.log.network),
'wire': env.log.enable and (env.log.all or env.log.packets),
'message': env.log.enable and (env.log.all or env.log.message),
'rib': env.log.enable and (env.log.all or env.log.rib),
'timer': env.log.enable and (env.log.all or env.log.timers),
'routes': env.log.enable and (env.log.all or env.log.routes),
'parser': env.log.enable and (env.log.all or env.log.parser),
}
destination = env.log.destination
if destination in ('stdout', 'stderr', 'syslog'):
cls.destination = destination
elif destination.startwith('file:'):
cls.destination = destination[5:]
else:
cls.destination = 'stdout'
@classmethod
def setup(cls, env):
cls.load(env)
# the time is used as we will need to re-init the logger once
# we have dropped root privileges so that any permission issues
# can be noticed at start time (and not once we try to rotate file for example)
now = str(time.time())
if cls.destination == 'stdout':
cls.logger = getLogger(
f'ExaBGP stdout {now}',
format='%(message)s',
stream=sys.stderr,
level=cls.level,
)
cls.formater = formater(env.log.short, 'stdout')
return
if cls.destination == 'stdout':
cls.logger = getLogger(
f'ExaBGP stderr {now}',
format='%(message)s',
stream=sys.stderr,
level=cls.level,
)
cls.formater = formater(env.log.short, 'stderr')
return
# if cls.destination == 'file':
# os.path.realpath(os.path.normpath(os.path.join(cls._cwd, destination)))
# _logger = getLogger('ExaBGP file', filename='')
# _format = formater(cls.enabled, 'stderr')
if cls.destination == 'syslog':
cls.logger = getLogger(
f'ExaBGP syslog {now}',
format='%(message)s',
address='/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
level=cls.level,
)
cls.formater = formater(env.log.short, 'syslog')
# need to re-add remote syslog
| bsd-3-clause | -9,054,028,878,139,946,000 | 30.152672 | 87 | 0.549865 | false |
javiercantero/streamlink | src/streamlink/plugins/raiplay.py | 1 | 1110 | from __future__ import print_function
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
class RaiPlay(Plugin):
url_re = re.compile(r"https?://(?:www\.)?raiplay\.it/dirette/(\w+)/?")
stream_re = re.compile(r"data-video-url.*?=.*?\"([^\"]+)\"")
stream_schema = validate.Schema(
validate.all(
validate.transform(stream_re.search),
validate.any(
None,
validate.all(validate.get(1), validate.url())
)
)
)
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
channel = self.url_re.match(self.url).group(1)
self.logger.debug("Found channel: {0}", channel)
stream_url = http.get(self.url, schema=self.stream_schema)
if stream_url:
return HLSStream.parse_variant_playlist(self.session, stream_url, headers={"User-Agent": useragents.CHROME})
__plugin__ = RaiPlay
| bsd-2-clause | 11,858,274,827,813,140 | 30.714286 | 120 | 0.625225 | false |
mooseman/pdteco | storecmds.py | 1 | 1067 |
# storecmds.py
# Store the requested commands and arguments
# until given the go-ahead to run them.
# The TECO commands we want are -
# Files - Read, write, save, close.
# Lines - Move between lines, within lines. Go to a line.
# Move "x" lines up or down. Move "x" bytes back or forward.
# Editing - Insert, delete, type.
# Looping - repeat a command "x" times.
# Variables - define and use.
# Macros - define and run.
argstack = cmdstack = []
cmddict = {}
# A stack for arguments.
def argstack(args):
for arg in args:
argstack.push(arg)
# A stack for commands.
def cmdstack(cmd):
cmdstack.push(cmd)
# A dict to map TECO command abbreviations to their Python equivalents.
def fillcmddict():
cmddict.update({"T": "print",
"D": "del" ,
"L": "move",
"I": "insert" ,
"S": "search" })
# Print the command dict
fillcmddict()
for x in cmddict.items():
print x
| unlicense | 7,182,744,791,214,006,000 | 21.702128 | 71 | 0.554827 | false |
arruda/wowa | wowa/tracker/migrations/0004_copy_item_to_characteritem.py | 1 | 1326 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, migrations
def itemCharacterToCharacterItemModel(apps, schema_editor):
"copy the data of all M2M (Item x Character) in the CharacterItem Model"
Item = apps.get_model("tracker", "Item")
CharacterItem = apps.get_model("tracker", "CharacterItem")
for item in Item.objects.all():
for character in item.characters.all():
character_item = CharacterItem(item=item, character=character)
character_item.save()
def revert(apps, schema_editor):
"revert back deleting the CharacterItem for each Item"
Item = apps.get_model("tracker", "Item")
CharacterItem = apps.get_model("tracker", "CharacterItem")
for item in Item.objects.all():
for character in item.characters.all():
try:
character_item = CharacterItem.objects.get(item=item, character=character)
character_item.delete()
except ObjectDoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('tracker', '0003_item_characters_new'),
]
operations = [
migrations.RunPython(itemCharacterToCharacterItemModel, reverse_code=revert),
]
| mit | -8,234,429,749,966,386,000 | 31.341463 | 90 | 0.668175 | false |
atados/atados-ovp | api/channels/gm/tests/test_tasks.py | 1 | 4525 | from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.core import mail
from ovp.apps.users.tests.fixture import UserFactory
from ovp.apps.organizations.tests.fixture import OrganizationFactory
from ovp.apps.projects.models import Project, Apply, Job, Work
from ovp.apps.core.helpers import get_email_subject
from server.celery_tasks import app
@override_settings(DEFAULT_SEND_EMAIL='sync',
CELERY_TASK_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_TASK_ALWAYS_EAGER=True)
class TestEmailTriggers(TestCase):
def setUp(self):
self.user = UserFactory.create(
email='[email protected]',
password='test_returned',
object_channel='gm'
)
self.organization = OrganizationFactory.create(
name='test org', owner=self.user,
type=0, object_channel='gm'
)
self.project = Project.objects.create(
name='test project', slug='test-slug',
details='abc', description='abc',
owner=self.user, organization=self.organization,
published=False, object_channel='gm'
)
self.project.published = True
self.project.save()
app.control.purge()
def test_applying_schedules_interaction_confirmation_email(self):
"""
Assert cellery task to ask about interaction
is created when user applies to project
"""
mail.outbox = []
Apply.objects.create(user=self.user, project=self.project, object_channel='gm')
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, get_email_subject(
'default', 'atados-askProjectInteractionConfirmation-toVolunteer', 'Ask project confirmation'
))
self.assertIn('vaga test project', mail.outbox[0].body)
def test_applying_schedules_reminder_email(self):
"""
Assert cellery task to remind volunteer
is created when user applies to project
"""
mail.outbox = []
Job.objects.create(
start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='gm'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='gm')
self.assertEqual(len(mail.outbox), 4)
self.assertEqual(mail.outbox[1].subject, 'Uma ação está chegando... estamos ansiosos para te ver.')
self.assertIn('test project', mail.outbox[1].body)
def test_applying_schedules_ask_about_project_experience_to_volunteer(self):
"""
Assert cellery task to ask volunteer about project
experience is created when user applies to project
"""
mail.outbox = []
work = Work.objects.create(project=self.project, object_channel='gm')
Apply.objects.create(user=self.user, project=self.project, object_channel='gm')
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[1].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[1].alternatives[0][0])
mail.outbox = []
work.delete()
Job.objects.create(
start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='gm'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='gm')
self.assertEqual(mail.outbox[2].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
def test_publishing_project_schedules_ask_about_experience_to_organization(self):
"""
Assert cellery task to ask organization about project
experience is created when user project is published
"""
mail.outbox = []
project = Project.objects.create(
name='test project', slug='test-slug', details='abc',
description='abc', owner=self.user, published=False,
organization=self.organization, object_channel='gm'
)
Work.objects.create(project=project, object_channel='gm')
project.published = True
project.save()
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[2].subject, 'Tá na hora de contar pra gente como foi')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
| agpl-3.0 | 2,517,957,798,256,695,300 | 38.640351 | 107 | 0.646825 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-zoo/package.py | 1 | 1989 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RZoo(RPackage):
"""An S3 class with methods for totally ordered indexed observations. It is
particularly aimed at irregular time series of numeric vectors/matrices and
factors. zoo's key design goals are independence of a particular
index/date/time class and consistency with ts and base R by providing
methods to extend standard generics."""
homepage = "http://zoo.r-forge.r-project.org/"
url = "https://cran.r-project.org/src/contrib/zoo_1.7-14.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/zoo"
version('1.7-14', '8c577a7c1e535c899ab14177b1039c32')
version('1.7-13', '99521dfa4c668e692720cefcc5a1bf30')
depends_on('r-lattice', type=('build', 'run'))
| lgpl-2.1 | -4,436,174,591,688,504,300 | 46.357143 | 79 | 0.68728 | false |
Jumpscale/jumpscale6_core | lib/JumpScale/portal/_socketprocess/SocketProcess.py | 1 | 15874 | import gevent
import sys
from gevent.server import StreamServer
from JumpScale import j
import inspect
import time
import os
from PortalTCPChannels import ManholeSession, WorkerSession, TCPSessionLog
try:
import fcntl
except:
pass
raise RuntimeError("is not working now")
#THERE ARE SOME GOOD IDEAS IN HERE IN HOW TO BUILD A SOCKET SERVER WITH MANOLE, ...
class PortalProcess():
"""
"""
def __init__(self, mainLoop=None, inprocess=False, cfgdir="", startdir=""):
self.started = False
# self.logs=[]
# self.errors=[]
self.epoch = time.time()
self.lock = {}
# j.errorconditionhandler.setExceptHook() #@does not do much?
# Trigger the key value store extension so the enum is loaded
self.cfgdir = cfgdir
if self.cfgdir == "":
self.cfgdir = "cfg"
# check if the dir we got started from is a link, if so will create a new dir and copy the config files to there
if j.system.fs.isLink(startdir, True):
# we are link do not use this config info
name = j.system.fs.getDirName(startdir + "/", True) + "_localconfig"
newpath = j.system.fs.joinPaths(j.system.fs.getParent(startdir + "/"), name)
if not j.system.fs.exists(newpath):
j.system.fs.createDir(newpath)
pathcfgold = j.system.fs.joinPaths(startdir, "cfg")
j.system.fs.copyDirTree(pathcfgold, newpath)
self.cfgdir = newpath
ini = j.tools.inifile.open(self.cfgdir + "/portal.cfg")
if ini.checkParam("main", "appdir"):
self.appdir = self._replaceVar(ini.getValue("main", "appdir"))
self.appdir=self.appdir.replace("$base",j.dirs.baseDir)
else:
self.appdir = j.system.fs.getcwd()
# self.codepath=ini.getValue("main","codepath")
# if self.codepath.strip()=="":
#self.codepath=j.system.fs.joinPaths( j.dirs.varDir,"actorscode")
# j.system.fs.createDir(self.codepath)
# self.specpath=ini.getValue("main","specpath")
# if self.specpath.strip()=="":
# self.specpath="specs"
# if not j.system.fs.exists(self.specpath):
#raise RuntimeError("spec path does have to exist: %s" % self.specpath)
dbtype = ini.getValue("main", "dbtype").lower().strip()
if dbtype == "fs":
self.dbtype = "FILE_SYSTEM"
elif dbtype == "mem":
self.dbtype = "MEMORY"
elif dbtype == "redis":
self.dbtype = "REDIS"
elif dbtype == "arakoon":
self.dbtype = "ARAKOON"
else:
raise RuntimeError("could not find appropriate core db, supported are: fs,mem,redis,arakoon, used here'%s'"%dbtype)
# self.systemdb=j.db.keyvaluestore.getFileSystemStore("appserversystem",baseDir=self._replaceVar(ini.getValue("systemdb","dbdpath")))
self.wsport = int(ini.getValue("main", "webserverport"))
secret = ini.getValue("main", "secret")
admingroups = ini.getValue("main", "admingroups").split(",")
# self.filesroot = self._replaceVar(ini.getValue("main", "filesroot"))
if self.wsport > 0 and inprocess == False:
self.webserver = j.core.portal.get(self.wsport, cfgdir=cfgdir,secret=secret,admingroups=admingroups)
else:
self.webserver = None
self._greenLetsPath = j.system.fs.joinPaths(j.dirs.varDir, "portal_greenlets", self.wsport)
j.system.fs.createDir(self._greenLetsPath)
sys.path.append(self._greenLetsPath)
self.tcpserver = None
self.tcpservercmds = {}
tcpserverport = int(ini.getValue("main", "tcpserverport", default=0))
if tcpserverport > 0 and inprocess == False:
self.tcpserver = StreamServer(('0.0.0.0', tcpserverport), self.socketaccept)
manholeport = int(ini.getValue("main", "manholeport", default=0))
self.manholeserver = None
if manholeport > 0 and inprocess == False:
self.manholeserver = StreamServer(('0.0.0.0', manholeport), self.socketaccept_manhole)
if inprocess == False and (manholeport > 0 or tcpserverport > 0):
self.sessions = {}
self.nrsessions = 0
# self.messagerouter=MessageRouter()
# self.logserver=None
self.logserver_enable = False
# if logserver==True:
#self.logserver=StreamServer(('0.0.0.0', 6002), self.socketaccept_log)
# self.logserver_enable=True
# elif logserver<>None:
# @todo configure the logging framework
# pass
self.ecserver_enable = False
# self.ecserver=None #errorconditionserver
# if ecserver==True:
#self.ecserver=StreamServer(('0.0.0.0', 6003), self.socketaccept_ec)
# self.ecserver_enable=True
# elif ecserver<>None:
# @todo configure the errorcondition framework
# pass
self.signalserver_enable = False
# self.signalserver=None #signal handling
# if signalserver==True:
#self.signalserver=StreamServer(('0.0.0.0', 6004), self.socketaccept_signal)
# self.signalserver_enable=True
# elif signalserver<>None:
# @todo configure the signal framework
# pass
self.mainLoop = mainLoop
j.core.portal.active = self
self.cfg = ini
# toload=[]
self.bootstrap()
# if self.ismaster:
# self.actorsloader.getActor("system", "master")
# self.master = j.apps.system.master.extensions.master
# # self.master._init()
# # self.master.gridmapPrevious=None
# # self.master.gridMapSave()
# # self.master.gridMapRegisterPortal(self.ismaster,self.ipaddr,self.wsport,self.secret)
# # look for nginx & start
# #self.startNginxServer()
# # self.scheduler = Scheduler()
# else:
# self.master = None
# #from JumpScale.core.Shell import ipshellDebug,ipshell
# # print "DEBUG NOW not implemented yet in appserver6process, need to connect to other master & master client"
# # ipshell()
self.loadFromConfig()
def reset(self):
self.bootstrap()
self.loadFromConfig()
def bootstrap(self):
self.actorsloader.reset()
self.actorsloader._generateLoadActor("system", "contentmanager", actorpath="system/system__contentmanager/")
# self.actorsloader._generateLoadActor("system", "master", actorpath="system/system__master/")
self.actorsloader._generateLoadActor("system", "usermanager", actorpath="system/system__usermanager/")
self.actorsloader.scan("system")
self.actorsloader.getActor("system", "usermanager")
# self.actorsloader.getActor("system", "errorconditionhandler")
# self.actorsloader._getSystemLoaderForUsersGroups()
def loadFromConfig(self, reset=False):
if reset:
j.core.codegenerator.resetMemNonSystem()
j.core.specparser.resetMemNonSystem()
self.webserver.contentdirs = {}
loader = self.actorsloader
self.webserver.loadFromConfig4loader(loader, reset)
def _replaceVar(self, txt):
# txt = txt.replace("$base", j.dirs.baseDir).replace("\\", "/")
txt = txt.replace("$appdir", j.system.fs.getcwd()).replace("\\", "/")
txt = txt.replace("$vardir", j.dirs.varDir).replace("\\", "/")
txt = txt.replace("$htmllibdir", j.html.getHtmllibDir()).replace("\\", "/")
txt = txt.replace("\\", "/")
return txt
# def startNginxServer(self):
# ini = j.tools.inifile.open("cfg/appserver.cfg")
# local = int(ini.getValue("nginx", "local")) == 1
# configtemplate = j.system.fs.fileGetContents(j.system.fs.joinPaths(j.core.portal.getConfigTemplatesPath(), "nginx", "appserver_template.conf"))
# configtemplate = self._replaceVar(configtemplate)
# if local:
# varnginx = j.system.fs.joinPaths(j.dirs.varDir, 'nginx')
# j.system.fs.createDir(varnginx)
# if j.system.platformtype.isWindows():
# apppath = self._replaceVar(ini.getValue("nginx", "apppath")).replace("\\", "/")
# cfgpath = j.system.fs.joinPaths(apppath, "conf", "sites-enabled", "appserver.conf")
# j.system.fs.writeFile(cfgpath, configtemplate)
# apppath2 = j.system.fs.joinPaths(apppath, "start.bat")
# cmd = "%s %s" % (apppath2, apppath)
# cmd = cmd.replace("\\", "/").replace("//", "/")
# extpath = inspect.getfile(self.__init__)
# extpath = j.system.fs.getDirName(extpath)
# maincfg = j.system.fs.joinPaths(extpath, "configtemplates", "nginx", "nginx.conf")
# configtemplate2 = j.system.fs.fileGetContents(maincfg)
# configtemplate2 = self._replaceVar(configtemplate2)
# j.system.fs.writeFile("%s/conf/nginx.conf" % apppath, configtemplate2)
# pid = j.system.windows.getPidOfProcess("nginx.exe")
# if pid != None:
# j.system.process.kill(pid)
# pid = j.system.windows.getPidOfProcess("php-cgi.exe")
# if pid != None:
# j.system.process.kill(pid)
# j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.varDir, "nginx"))
# print "start nginx, cmd was %s" % (cmd)
# j.system.process.executeAsync(cmd, outputToStdout=False)
# else:
# j.system.platform.ubuntu.check()
# j.system.fs.remove("/etc/nginx/sites-enabled/default")
# cfgpath = j.system.fs.joinPaths("/etc/nginx/sites-enabled", "appserver.conf")
# j.system.fs.writeFile(cfgpath, configtemplate)
# if not j.system.fs.exists("/etc/nginx/nginx.conf.backup"):
# j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.varDir, "nginx"))
# maincfg = j.system.fs.joinPaths(j.core.portal.getConfigTemplatesPath(), "nginx", "nginx.conf")
# configtemplate2 = j.system.fs.fileGetContents(maincfg)
# configtemplate2 = self._replaceVar(configtemplate2)
# j.system.fs.copyFile("/etc/nginx/nginx.conf", "/etc/nginx/nginx.conf.backup")
# j.system.fs.writeFile("/etc/nginx/nginx.conf", configtemplate2)
# j.system.process.execute("/etc/init.d/nginx restart")
# j.system.process.execute("/etc/init.d/nginx reload")
# else:
# pass
# #raise RuntimeError("only supported in nginx mode")
def activateActor(self, appname, actor):
if not "%s_%s" % (appname, actor) in self.actors.keys():
# need to activate
result = self.actorsloader.getActor(appname, actor)
if result == None:
# there was no actor
return False
def addTCPServerCmd(self, cmdName, function):
self.tcpservercmds[cmdName] = function
def setTcpServer(self, socketAcceptFunction):
self.tcpserver = StreamServer(('0.0.0.0', 6000), socketAcceptFunction)
def _addsession(self, session):
self.sessions[self.nrsessions] = session
session.sessionnr = self.nrsessions
self.nrsessions += 1
session.ready()
return self.nrsessions - 1
# this handler will be run for each incoming connection in a dedicated greenlet
def socketaccept_manhole(self, socket, address):
ip, port = address
socket.sendall('Manhole For Portal Server \n\n')
session = ManholeSession(ip, port, socket)
self._addsession(session)
session.run()
def socketaccept(self, socket, address):
ip, port = address
session = WorkerSession(ip, port, socket)
self._addsession(session)
def socketaccept_log(self, socket, address):
ip, port = address
session = TCPSessionLog(ip, port, socket)
self._addsession(session)
# def socketaccept_ec(self,socket, address):
# ip,port=address
# session=TCPSessionEC(ip,port,socket)
# self._addsession(session)
# def socketaccept_signal(self,socket, address):
# ip,port=address
# session=TCPSessionSignal(ip,port,socket)
# self._addsession(session)
def _timer(self):
"""
will remember time every 1/10 sec
"""
while True:
# self.epochbin=struct.pack("I",time.time())
self.epoch = time.time()
gevent.sleep(0.1)
# def _taskSchedulerTimer(self):
# """
# every 4 seconds check maintenance queue
# """
# while True:
# gevent.sleep(5)
# self.scheduler.check(self.epoch)
def addQGreenlet(self, appName, greenlet):
"""
"""
if self.webserver == None:
return
qGreenletObject = greenlet()
if qGreenletObject.method == "":
raise RuntimeError("greenlet class needs to have a method")
if qGreenletObject.actor == "":
raise RuntimeError("greenlet class needs to have a actor")
qGreenletObject.server = self
self.webserver.addRoute(function=qGreenletObject.wscall,
appname=appName,
actor=qGreenletObject.actor,
method=qGreenletObject.method,
paramvalidation=qGreenletObject.paramvalidation,
paramdescription=qGreenletObject.paramdescription,
paramoptional=qGreenletObject.paramoptional,
description=qGreenletObject.description, auth=qGreenletObject.auth)
def start(self, key=None, reset=False):
# this is the trigger to start
print "STARTING applicationserver on port %s" % self.wsport
TIMER = gevent.greenlet.Greenlet(self._timer)
TIMER.start()
if self.mainLoop != None:
MAINLOOP = gevent.greenlet.Greenlet(self.mainLoop)
MAINLOOP.start()
self.started = True
if self.tcpserver != None:
self.tcpserver.start()
if self.manholeserver != None:
self.manholeserver.start()
if self.logserver_enable == True:
self.logserver.start()
if self.ecserver_enable == True:
self.ecserver.start()
if self.signalserver_enable == True:
self.signalserver.start()
# self.redirectErrors()
if self.webserver != None:
self.webserver.start(reset=reset)
def processErrorConditionObject(self, eco):
eco.process()
def restartInProcess(self, app):
args = sys.argv[:]
args.insert(0, sys.executable)
apppath = j.system.fs.joinPaths(j.dirs.appDir, app)
max_fd = 1024
for fd in range(3, max_fd):
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
os.chdir(apppath)
os.execv(sys.executable, args)
# def getRedisClient(self,appname,actorname):
# if ini.checkSection("redis"):
# redisip=ini.getValue("redis","ipaddr")
# redisport=ini.getValue("redis","port")
#redisclient=redis.StrictRedis(host=redisip, port=int(redisport), db=0)
# else:
# redisclient=None
# return redisclient
| bsd-2-clause | 6,597,543,739,962,115,000 | 37.158654 | 153 | 0.588824 | false |
domoinc/domo-python-sdk | pydomo/__init__.py | 1 | 26888 | from pydomo.Transport import DomoAPITransport
from pydomo.datasets import DataSetClient
from pydomo.datasets import DataSetRequest
from pydomo.datasets import Schema
from pydomo.datasets import Column
from pydomo.datasets import ColumnType
from pydomo.groups import GroupClient
from pydomo.pages import PageClient
from pydomo.streams import StreamClient
from pydomo.users import UserClient
from pydomo.users import CreateUserRequest
from pydomo.accounts import AccountClient
from pydomo.utilities import UtilitiesClient
from pandas import read_csv
from pandas import DataFrame
from pandas import to_datetime
from io import StringIO
import logging
DOMO = """####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
``.############.` `.###### `#######################``######.` ``.#####
.#######.` `.### .###################` ###.` `###
#######..` .####` ..######.` `## .###############` ##` ..######.` `#
###########` .## .############. `# .###########` # .############.
############. #` `################ ` .#######. ` `################
#############` #################. `` .###. `` #################.
#############` ################## .##` ` `##` ##################
#############` #################. .####` `####` #################.
############. #` `################ ` .######` `######` ` `################
###########` .## .############. # .########`########` # .############.
#######..` .####` ########.` `## .#################` ##` ..######.` `#
.#######.` `.### .#################` ###.` `.##
.############.` `.###### .#################` ######.` `.#####
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################"""
parent_logger = logging.getLogger('pydomo')
parent_logger.setLevel(logging.WARNING)
class Domo:
def __init__(self, client_id, client_secret, api_host='api.domo.com', **kwargs):
if 'logger_name' in kwargs:
self.logger = parent_logger.getChild(kwargs['logger_name'])
else:
self.logger = parent_logger
if kwargs.get('log_level'):
self.logger.setLevel(kwargs['log_level'])
self.logger.debug("\n" + DOMO + "\n")
self.transport = DomoAPITransport(client_id, client_secret, api_host, kwargs.get('use_https', True), self.logger)
self.datasets = DataSetClient(self.transport, self.logger)
self.groups = GroupClient(self.transport, self.logger)
self.pages = PageClient(self.transport, self.logger)
self.streams = StreamClient(self.transport, self.logger)
self.users = UserClient(self.transport, self.logger)
self.accounts = AccountClient(self.transport, self.logger)
self.utilities = UtilitiesClient(self.transport, self.logger)
######### Datasets #########
def ds_meta(self, dataset_id):
"""
Get a DataSet metadata
:Parameters:
- `dataset_id`: id of a dataset (str)
:Returns:
- A dict representing the dataset meta-data
"""
return self.datasets.get(dataset_id)
def ds_delete(self, dataset_id, prompt_before_delete=True):
"""
Delete a DataSet naming convention equivalent with rdomo
:Parameters:
- `dataset_id`: id of a dataset (str)
"""
del_data = 'Y'
if prompt_before_delete:
del_data = input("Permanently delete this data set? This is destructive and cannot be reversed. (Y/n)")
out = 'Data set not deleted'
if del_data == 'Y':
out = self.datasets.delete(dataset_id)
return out
def ds_list(self, df_output = True, per_page=50, offset=0, limit=0):
"""
List DataSets
>>> l = domo.ds_list(df_output=True)
>>> print(l.head())
:Parameters:
- `df_output`: should the result be a dataframe. Default True (Boolean)
- `per_page`: results per page. Default 50 (int)
- `offset`: offset if you need to paginate results. Default 0 (int)
- `limit`: max ouput to return. If 0 then return all results on page. Default 0 (int)
:Returns:
list or pandas dataframe depending on parameters
"""
l = self.datasets.list()
if df_output == False:
out = list(l)
else:
out = DataFrame(list(l))
return out
def ds_query(self, dataset_id, query, return_data=True):
"""
Evaluate query and return dataset in a dataframe
>>> query = {"sql": "SELECT * FROM table LIMIT 2"}
>>> ds = domo.ds_query('80268aef-e6a1-44f6-a84c-f849d9db05fb', query)
>>> print(ds.head())
:Parameters:
- `dataset_id`: id of a dataset (str)
- `query`: query object (dict)
- `return_data`: should the result be a dataframe. Default True (Boolean)
:Returns:
dict or pandas dataframe depending on parameters
"""
output = self.datasets.query(dataset_id, query)
if(return_data == True):
output = DataFrame(output['rows'], columns = output['columns'])
return output
def ds_get(self, dataset_id):
"""
Export data to pandas Dataframe
>>> df = domo.ds_get('80268aef-e6a1-44f6-a84c-f849d9db05fb')
>>> print(df.head())
:Parameters:
- `dataset_id`: id of a dataset (str)
:Returns:
pandas dataframe
"""
csv_download = self.datasets.data_export(dataset_id, include_csv_header=True)
content = StringIO(csv_download)
df = read_csv(content)
# Convert to dates or datetimes if possible
for col in df.columns:
if df[col].dtype == 'object':
try:
df[col] = to_datetime(df[col])
except ValueError:
pass
return df
def ds_create(self, df_up, name, description=''):
dsr = DataSetRequest()
dsr.name = name
dsr.description = description
dsr.schema = Schema([Column(ColumnType.STRING, 'tt1'),
Column(ColumnType.STRING, 'tt2')])
new_ds_info = self.datasets.create(dsr)
self.utilities.stream_upload(new_ds_info['id'],df_up,warn_schema_change=False)
return new_ds_info['id']
def ds_update(self, ds_id, df_up):
return self.utilities.stream_upload(ds_id, df_up)
######### PDP #########
def pdp_create(self, dataset_id, pdp_request):
"""
Create a PDP policy
>>> policy = {
"name": "Only Show Attendees",
"filters": [ {
"column": "Attending",
"values": [ "TRUE" ],
"operator": "EQUALS"
} ],
"users": [ 27 ]
}
>>> domo.pdp_create('4405ff58-1957-45f0-82bd-914d989a3ea3', policy)
{"id" : 8, "type": "user", "name": "Only Show Attendees"
, "filters": [{"column": "Attending", "values": [ "TRUE" ], "operator": "EQUALS"
, "not": false } ], "users": [ 27 ],"groups": [ ]}
:Parameters:
- `dataset_id`: id of the dataset PDP will be applied to (String) Required
Policy Object:
- `name`: Name of the Policy (String) Required
- `filters[].column`: Name of the column to filter on (String) Required
- `filters[].not`: Determines if NOT is applied to the filter operation (Boolean) Required
- `filters[].operator`: Matching operator (EQUALS) (String) Required
- `filters[].values[]`: Values to filter on (String) Required
- `type`: Type of policy (user or system) (String) Required
- `users`: List of user IDs the policy applies to (array) Required
- `groups`: List of group IDs the policy applies to (array) Required
"""
return self.datasets.create_pdp(dataset_id, pdp_request)
def pdp_delete(self, dataset_id, policy_id):
"""
Delete PDP Policy
>>> domo.pdp_delete('4405ff58-1957-45f0-82bd-914d989a3ea3', 35)
:Parameters:
- `dataset_id`: id of the dataset PDP will be applied to (String) Required
- `policy_id`: id of the policy to delete (String) Required
"""
return self.datasets.delete_pdp(dataset_id, policy_id)
def pdp_list(self, dataset_id, df_output = True):
"""
List PDP policies
>>> l = domo.pdp_list(df_output=True)
>>> print(l.head())
:Parameters:
- `dataset_id`: id of dataset with PDP policies (str) Required
- `df_output`: should the result be a dataframe. Default True (Boolean)
:Returns:
list or pandas dataframe depending on parameters
"""
output = self.datasets.list_pdps(dataset_id)
if(df_output == True):
output = DataFrame(output)
return output
def pdp_update(self, dataset_id, policy_id, policy_update):
"""
Update a PDP policy
>>> policy = {
"name": "Only Show Attendees",
"filters": [ {
"column": "Attending",
"values": [ "TRUE" ],
"operator": "EQUALS"
} ],
"users": [ 27 ]
}
>>> domo.pdp_create('4405ff58-1957-45f0-82bd-914d989a3ea3', 4, policy)
{"id" : 8, "type": "user", "name": "Only Show Attendees"
, "filters": [{"column": "Attending", "values": [ "TRUE" ], "operator": "EQUALS"
, "not": false } ], "users": [ 27 ],"groups": [ ]}
:Parameters:
- `dataset_id`: id of the dataset PDP will be applied to (String) Required
- `policy_id`: id of the PDP pollicy that will be updated (String) Required
Policy Object:
- `name`: Name of the Policy (String) Required
- `filters[].column`: Name of the column to filter on (String) Required
- `filters[].not`: Determines if NOT is applied to the filter operation (Boolean) Required
- `filters[].operator`: Matching operator (EQUALS) (String) Required
- `filters[].values[]`: Values to filter on (String) Required
- `type`: Type of policy (user or system) (String) Required
- `users`: List of user IDs the policy applies to (array) Required
- `groups`: List of group IDs the policy applies to (array) Required
"""
return self.datasets.update_pdp(dataset_id, policy_id, policy_update)
######### Pages #########
def page_create(self, name, **kwargs):
"""Create a new page.
>>> page = {'name':'New Page'}
>>> new_page = domo.pages.create(**page)
>>> print(new_page)
{'id': 123456789, 'parentId': 0, 'name': 'My Page',
'locked': False, 'ownerId': 12345, 'cardIds': [],
'visibility': {'userIds': 12345}}
:Parameters:
- `name`: The name of the new page
- `parentId`: (optional) If present create page as subpage
- `locked`: (optional) whether to lock the page
- `cardIds`: (optional) cards to place on the page
- `visibility`: (optional) dict of userIds and/or groupIds to
give access to
:Returns:
- A dict representing the page
"""
return self.pages.create(name, **kwargs)
def page_get(self, page_id):
"""Get a page.
>>> page = domo.pages.get(page_id)
>>> print(page)
{'id': 123456789, 'parentId': 0, 'name': 'My Page',
'locked': False, 'ownerId': 12345, 'cardIds': [],
'visibility': {'userIds': 12345}}
:Parameters:
- `page_id`: ID of the page to get
:returns:
- A dict representing the page
"""
return self.pages.get(page_id)
def page_delete(self, page_id):
"""Delete a page.
:Parameters:
- `page_id`: ID of the page to delete
"""
return self.pages.delete(page_id)
def collections_create(self, page_id, title, **kwargs):
"""Create a collection on a page.
>>> collection = domo.pages.create_collection(page_id,
'Collection')
>>> print(collection)
{'id': 1234321, 'title': 'Collection', 'description': '',
'cardIds': []}
:Parameters:
- `page_id`: ID of the page to create a collection on
- `title`: The title of the collection
- `description`: (optional) The description of the collection
- `cardIds`: (optional) cards to place in the collection
:Returns:
- A dict representing the collection
"""
return self.pages.create_collection(page_id, title, **kwargs)
def page_get_collections(self, page_id):
"""Get a collections of a page
>>> print(domo.pages.get_collections(page_id))
[{'id': 1234321, 'title': 'Collection', 'description': '',
'cardIds': []}]
:Parameters:
- `page_id`: ID of the page
:Returns:
- A list of dicts representing the collections
"""
return self.pages.get_collections(page_id)
def collections_update(self, page_id, collection_id=None, **kwargs):
"""Update a collection of a page.
>>> collections = domo.pages.get_collections(page_id)
>>> print(collections)
[{'id': 1234321, 'title': 'Collection', 'description': '',
'cardIds': []}]
>>> collection_id = collections[0]['id']
>>> domo.pages.update_collection(page_id, collection_id,
description='description',
cardIds=[54321, 13579])
>>> print(domo.pages.get_collections(page_id))
[{'id': 1234321, 'title': 'Collection',
'description': 'description', 'cardIds': [54321, 13579]}]
# Using **kwargs:
>>> collections = domo.pages.get_collections(page_id)
>>> collections[0]['description'] = 'Description'
>>> domo.pages.update_collection(page_id, **collections[0])
:Parameters:
- `page_id`: ID of the page the collection is on
- `collection_id`: ID of the collection. Can also be provided
by supplying `id` to **kwargs. This allows for calling
get_collections, updating one of the returned collections,
then passing it to update_collection.
- `title`: (optional) update the title
- `description`: (optional) update the description
- `cardIds`: (optional) update cards in the collection
:Returns:
- A dict representing the collection
"""
return self.pages.update_collection(page_id, collection_id, **kwargs)
def collections_delete(self, page_id, collection_id):
"""Delete a collection from a page.
:Parameters:
- `page_id`: ID of the page the collection is on
- `collection_id`: ID of the collection to delete
"""
return self.pages.delete_collection(page_id, collection_id)
def page_list(self, per_page=50, offset=0, limit=0):
"""List pages.
Returns a list of dicts (with nesting possible)
If limit is supplied and non-zero, returns up to limit pages
"""
return list(self.pages.list())
def page_update(self, page_id=None, **kwargs):
"""Update a page.
>>> print(domo.pages.get(page_id))
{'id': 123456789, 'parentId': 0, 'name': 'My Page',
'locked': False, 'ownerId': 12345, 'cardIds': [],
'visibility': {'userIds': 12345}}
>>> domo.pages.update(page_id, locked=True,
cardIds=[54321, 13579])
>>> print(domo.pages.get(page_id))
{'id': 123456789, 'parentId': 0, 'name': 'My Page',
'locked': True, 'ownerId': 12345, 'cardIds': [54321, 13579],
'visibility': {'userIds': 12345}}
# Using **kwargs:
>>> page = domo.pages.get(page_id)
>>> page['cardIds'].append(new_card_id)
>>> domo.pages.update(**page)
:Parameters:
- `page_id`: ID of the page to update. Can also be provided
by supplying `id` to **kwargs. This allows for calling get,
updating the returned object, then passing it to update.
- `name`: (optional) rename the page
- `parentId`: (optional) turn page into subpage, or subpage
into top-level page if parentId is present and falsey
- `ownerId`: (optional) change owner of the page
- `locked`: (optional) lock or unlock the page
- `collectionIds`: (optional) reorder collections on page
- `cardIds`: (optional) specify which cards to have on page
- `visibility`: (optional) change who can see the page
"""
return self.pages.update(page_id, **kwargs)
######### Groups #########
def groups_add_users(self, group_id, user_id):
"""
Add a User to a Group
"""
if isinstance(user_id,list):
for x in user_id:
self.groups.add_user(group_id, x)
else:
self.groups.add_user(group_id, user_id)
return 'success'
def groups_create(self, group_name, users=-1, active='true'):
"""
Create a Group
"""
req_body = {'name':group_name,'active':active}
grp_created = self.groups.create(req_body)
if (not isinstance(users,list) and users > 0) or isinstance(users,list):
self.groups_add_users(grp_created['id'],users)
return grp_created
def groups_delete(self, group_id):
"""
Delete a Group
"""
existing_users = self.groups_list_users(group_id)
self.groups_remove_users(group_id,existing_users)
return self.groups.delete(group_id)
def groups_get(self, group_id):
"""
Get a Group Definition
"""
return self.groups.get(group_id)
def groups_list(self):
"""
List all groups in Domo instance in a pandas dataframe.
"""
grps = []
n_ret = 1
off = 0
batch_size = 500
while n_ret > 0:
gg = self.groups.list(batch_size,off*batch_size)
grps.extend(gg)
n_ret = gg.__len__()
off += 1
return DataFrame(grps)
def groups_list_users(self, group_id):
"""
List Users in a Group
"""
user_list = []
n_ret = 1
off = 0
batch_size=500
while n_ret > 0:
i_users = self.groups.list_users(group_id,limit=batch_size,offset=off*batch_size)
user_list.extend(i_users)
n_ret = i_users.__len__()
off += 1
return user_list
def groups_remove_users(self, group_id, user_id):
"""
Remove a User to a Group
"""
if isinstance(user_id,list):
for x in user_id:
self.groups.remove_user(group_id, x)
else:
self.groups.remove_user(group_id, user_id)
return 'success'
######### Accounts #########
def accounts_list(self):
"""List accounts.
Returns a generator that will call the API multiple times
If limit is supplied and non-zero, returns up to limit accounts
>>> list(domo.accounts.list())
[{'id': '40', 'name': 'DataSet Copy Test', ...},
{'id': '41', 'name': 'DataSet Copy Test2', ...}]
:Parameters:
- `per_page`: results per page. Default 50 (int)
- `offset`: offset if you need to paginate results. Default 0 (int)
- `limit`: max ouput to return. If 0 then return all results on page. Default 0 (int)
:returns:
- A list of dicts (with nesting possible)
"""
return list(self.accounts.list())
def accounts_get(self, account_id):
"""Get a account.
>>> account = domo.accounts.get(account_id)
>>> print(account)
{'id': '40', 'name': 'DataSet Copy Test', 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
:Parameters:
- `account_id`: ID of the account to get (str)
:returns:
- A dict representing the account
"""
return self.accounts.get(account_id)
def accounts_delete(self, account_id):
"""Delete a account.
:Parameters:
- `account_id`: ID of the account to delete
"""
return self.accounts.delete(account_id)
def accounts_create(self, **kwargs):
"""Create a new account.
>>> account = { 'name': 'DataSet Copy Test', 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
>>> new_account = domo.accounts.create(**account)
>>> print(new_account)
{'name': 'DataSet Copy Test', 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
:Returns:
- A dict representing the account
"""
return self.accounts.create(**kwargs)
def accounts_update(self, account_id, **kwargs):
"""Update a account.
>>> print(domo.accounts.get(account_id))
{'id': '40', 'name': 'DataSet Copy Test', 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
updatedAccount = {'name': 'DataSet Copy Test2, 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
>>> domo.accounts.update(account_id, **updatedAccount)
>>> print(domo.accounts.get(account_id))
{'id': '40', 'name': 'DataSet Copy Test2, 'valid': True, 'type': {'id': 'domo-csv', 'properties': {}}}
:Parameters:
- `account_id`: ID of the account to update.
- `kwargs`: New account object
"""
return self.accounts.update(account_id, **kwargs)
######### Users #########
def users_add(self, x_name, x_email, x_role, x_sendInvite=False):
uu = CreateUserRequest()
uu.name = x_name
uu.email = x_email
uu.role = x_role
return self.users.create(uu,x_sendInvite)
def users_get(self, user_id):
return self.users.get(user_id)
def users_list(self,df_output=True):
return self.users.list_all(df_output)
def users_update(self, user_id, user_def):
return self.users.update(user_id, user_def)
def users_delete(self, user_id):
return self.users.delete(user_id)
| mit | 4,001,018,997,861,756,000 | 38.195335 | 121 | 0.449643 | false |
us-ignite/us_ignite | us_ignite/hubs/migrations/0002_auto__add_field_hub_is_homepage.py | 1 | 18035 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Hub.is_homepage'
db.add_column(u'hubs_hub', 'is_homepage',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Hub.is_homepage'
db.delete_column(u'hubs_hub', 'is_homepage')
models = {
u'apps.application': {
'Meta': {'ordering': "('-is_featured', 'created')", 'object_name': 'Application'},
'acknowledgments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'assistance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Domain']", 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
'features_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'impact_statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'membership_set'", 'symmetrical': 'False', 'through': u"orm['apps.ApplicationMembership']", 'to': u"orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ownership_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'slug': ('us_ignite.common.fields.AutoUUIDField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'apps.applicationmembership': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApplicationMembership'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'apps.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'apps.feature': {
'Meta': {'object_name': 'Feature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hubs.hub': {
'Meta': {'ordering': "('-is_featured', 'created')", 'object_name': 'Hub'},
'applications': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Application']", 'symmetrical': 'False', 'blank': 'True'}),
'connections': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'estimated_passes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experimentation': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'is_advanced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'network_speed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.NetworkSpeed']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'hubs.hubactivity': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubActivity'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'hubs.hubappmembership': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubAppMembership'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'hubs.hubmembership': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubMembership'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'hubs.hubrequest': {
'Meta': {'ordering': "('created',)", 'object_name': 'HubRequest'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'hubs.networkspeed': {
'Meta': {'object_name': 'NetworkSpeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'organizations.organization': {
'Meta': {'object_name': 'Organization'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'interest_ignite': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Interest']", 'symmetrical': 'False', 'blank': 'True'}),
'interests_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['organizations.OrganizationMember']", 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'resources_available': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'organizations.organizationmember': {
'Meta': {'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'profiles.interest': {
'Meta': {'ordering': "('name',)", 'object_name': 'Interest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
}
}
complete_apps = ['hubs'] | bsd-3-clause | 7,241,968,358,396,634,000 | 84.075472 | 226 | 0.555531 | false |
mupif/mupif | mupif/examples/obsolete/Example11-stacTM/Example11-stacTM/mechanicalServer.py | 1 | 1781 | import os,sys
sys.path.extend(['..','../../..','../Example10-stacTM-local','../Example11-thermoMechanical' ])#Path to demoapp
from mupif import *
import demoapp
import logging
log = logging.getLogger()
## Local setup - nameserver, thermal server, mechanical server, steering script.
## All runs on a local machine ##
#import conf as cfg
#mechanical = demoapp.mechanical('inputM11.in', '.')
#log.info(mechanical.getApplicationSignature())
##locate nameserver and register application
#PyroUtil.runAppServer(server=cfg.server2, port=cfg.serverPort2, nathost=cfg.server2, natport=cfg.serverPort2, nshost=cfg.nshost, nsport=cfg.nsport, appName='mechanicalServer1', hkey=cfg.hkey, app=mechanical)
## Distributed setup using VPN and peer-to-peer connection. Nameserver remote, thermal server remote.
## Mechanical server local, steering script local
import conf_vpn as cfg
mechanical = demoapp.mechanical('inputM11.in', '.')
log.info(mechanical.getApplicationSignature())
#locate nameserver and register application
PyroUtil.runAppServer(server=cfg.server3, port=cfg.serverPort3, nathost=cfg.server3, natport=cfg.serverPort3, nshost=cfg.nshost, nsport=cfg.nsport, appName='mechanicalServer1', hkey=cfg.hkey, app=mechanical)
##Run a daemon for jobManager on this machine
#daemon = cfg.Pyro4.Daemon(host=127.0.0.1, port=44382)
##Run job manager on a server
#jobMan = JobManager.SimpleJobManager2(daemon, ns, None, cfg.jobManName, cfg.portsForJobs, cfg.jobManWorkDir, os.getcwd(), 'thermalServerConfig', cfg.jobMan2CmdPath, cfg.maxJobs, cfg.socketApps)
##set up daemon with JobManager
#uri = daemon.register(jobMan)
##register JobManager to nameServer
#ns.register(cfg.jobManName, uri)
#print ("Daemon for JobManager runs at " + str(uri))
##waits for requests
#daemon.requestLoop()
| lgpl-3.0 | 7,215,321,499,257,930,000 | 44.666667 | 208 | 0.773723 | false |
tiarno/odspdfcrop | odspdfcrop.py | 1 | 12583 | """
:platform: Unix, Windows
:synopsis: Given a directory of numbered PDF files,
optionally split them into one-page PDFs while retaining the
numeric order, and crop white space on all sides of each PDF.
:requirements: pyPdf package and access to Ghostscript executable (for finding the bounding box)
:assumption: The original PDF files contain information in an ordered stream,
and named according to the following pattern::
somefile.pdf somefile1.pdf somefile2.pdf somefile3.pdf and so on
.. moduleauthor:: Tim Arnold <[email protected]>
"""
import argparse
import multiprocessing
import multiprocessing.forking
import os
import re
import shutil
import subprocess
import sys
import time
'Attempt to use pyPdf; use PyPDF2 if that fails'
try:
import pyPdf
except ImportError:
import PyPDF2 as pyPdf
pat = re.compile(r'([\w_\d]+?)(\d+)\.pdf')
def get_filedigit(fdict):
''' Returns the number at the end of a file-stem.
Used as sort-key function to sort a list of dictionaries
Args:
fdict: a dictionary of file information
Returns:
number at the end of the file-stem
Example:
fdict = {'name': 'one12.pdf', }
digit = 12
'''
matched = re.search(pat, fdict['name'])
if matched:
digit = int(matched.group(2))
else:
digit = 0
return digit
def get_stems(files):
''' Returns a list of file-stems from a list of file names.
Used to organize files by stem name.
Args:
list of complete file names
Returns:
list of file stems computed from complete names
Example::
files = ['one.pdf', 'one1.pdf, 'one2.pdf',
'two.pdf', 'two1.pdf']
stems = ['one', 'two']
'''
stems = set()
for name in files:
filedigit = re.search(pat, name)
if filedigit:
stem = filedigit.group(1)
else:
stem = os.path.splitext(name)[0]
stems.add(stem)
return stems
def write_page(filename, obj, pagenum, crop=False):
'''Write a PDF object to disk.
Args:
:filename: the file to create
:obj: the PDF object in memory
:pagenum: the page in the PDF to write
:crop: flag indicating whether the function is
called from the cropping process
Used for splitting pdfs by page and writing cropped objects
to disk. If called from the cropping process, add metadata to
the PDF so we don't try to split or crop it in some subsequent run.
Returns: None
'''
p = pyPdf.PdfFileWriter()
if crop:
infoDict = p._info.getObject()
infoDict.update({pyPdf.generic.NameObject('/Cropped'):
pyPdf.generic.createStringObject(u'True')})
page = obj.getPage(pagenum)
p.addPage(page)
p.write(open(filename, 'wb'))
def get_bbox(ghostscript, filename):
'''Get the Bounding Box of a page from a PDF file, using Ghostscript
Args:
:ghostscript: the full path to the Ghostscript executable
:filename: the name of the PDF file to query for the bounding box.
Used to crop a PDF object
Returns:
:bounds: a 4-element list of floats representing the bounding box.
'''
cmd = '%s -sDEVICE=bbox -dBATCH -dNOPAUSE -dQUIET %s' % (ghostscript, filename)
s = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
s = s[s.rindex('HiResBoundingBox:') + 17:].strip()
str_bounds = s.split()
if len(str_bounds) > 4:
print '\nERROR for %s: %s' % (filename, ' '.join(str_bounds[4:]))
str_bounds = str_bounds[:4]
try:
bounds = map(float, str_bounds)
except ValueError as e:
print '%s\nSkipping %s: Bad bounding box' % (e, filename)
bounds = list()
return bounds
def rename_files(source_dir, extstring):
'''Rename files in a directory by removing a string from the name.
Writing files to disk is faster than passing around open PDF object
streams. The files are written with a _CROP or _SPLIT flag in the name.
This function removes the flag, resulting in overwriting the original PDF file.
Args:
:source_dir: the directory of files
:extstring: the string to remove from each name
Returns: None
'''
pdffiles = [x for x in os.listdir(source_dir) if x.endswith(extstring)]
for name in pdffiles:
src = os.path.join(source_dir, name)
tgt = os.path.join(source_dir, name.replace(extstring, '.pdf'))
if os.path.isfile(src):
shutil.copy(src, tgt)
os.unlink(src)
class PDFFixer(object):
'''Class to (optionally split, re-number, and) crop a directory of PDF files.
There are three stages to the process:
1. Initialize. Read the files in the directory using multiple processes.
Record the PDF's filename, the number of pages it contains, and a
boolean indicating if it has been cropped. Close the files.
2. Split and renumber the PDFs. In a set of multiple processes,
split any PDFs that contain multiple pages into separate files.
Then re-number the PDF files such that the information contained
in the original stream is kept it the original order.
3. Crop the whitespace from the margins of each PDF.
'''
def __init__(self, args):
self.source_dir = args.dir
self.ghostscript = args.ghostscript
self.file_info = list()
self.cropped = list()
self.pdffiles = [x for x in os.listdir(self.source_dir) if x.endswith('.pdf')]
print 'Reading %d files' % len(self.pdffiles)
processes = dict()
q = multiprocessing.Queue()
for name in self.pdffiles:
processes[name] = multiprocessing.Process(target=self.read, args=(q, name))
processes[name].start()
for _ in processes:
item = q.get()
name, cropped, pages = item['name'], item['cropped'], item['pages']
self.file_info.append({'name': name, 'pages': pages})
if cropped:
self.cropped.append(name)
print
def read(self, q, name):
'''Read a PDF file, find the number of pages it contains and whether
it contains metadata indicating it has been cropped in a previous
run. Save the information and place it in a queue that is used after
all processes have completed.
'''
print '.',
obj = pyPdf.PdfFileReader(open(os.path.join(self.source_dir, name), 'rb'))
docinfo = obj.getDocumentInfo()
cropped = docinfo and docinfo.has_key('/Cropped')
pages = obj.getNumPages()
q.put({'name': name, 'cropped': cropped, 'pages':pages})
obj.stream.close()
def split(self):
'''Create a data structure, `stem_info`, which contains an ordered list
of the files that match to each file-stem.
Process each list of files by file-stem. If no pdf files in the list
have multiple pages, this method does nothing.
If multiple pages do exist for at least one file in the list, split the
files from that point on so that each pdf file contains one page.
Then write the pages to files, renumbering so the information stream
keeps its original order.
Rename the files so the original files are overwritten after all processes
are complete.
'''
stem_info = dict()
for stem in get_stems(self.pdffiles):
'Create the data structure to match a list of files according to its stem'
stem_matches = ['%s.pdf' % stem]
stem_matches.extend([x for x in self.pdffiles if re.match(r'%s\d+\.pdf' % stem, x)])
stem_info[stem] = [{'name': x['name'], 'pages': x['pages']}
for x in self.file_info if x['name'] in stem_matches]
for stem in stem_info:
'if no file in the list contains multiple pages, do nothing'
if sum(x['pages'] for x in stem_info[stem]) == len(stem_info[stem]):
continue
start_splitting = False
filedigit = 0
files_info = sorted(stem_info[stem], key=get_filedigit)
for pdfdict in files_info:
name = pdfdict['name']
pages = pdfdict['pages']
if not start_splitting and pages > 1:
start_splitting = True
if not start_splitting:
print 'skipping %s' % name
filedigit += 1
continue
print '%30s (%d pages)' % (name, pages)
'''Write a new one-page file for each page in the stream
naming the files consecutively.
'''
obj = pyPdf.PdfFileReader(open(os.path.join(self.source_dir, name), 'rb'))
for pagenum in range(0, pages):
if filedigit == 0:
fname = os.path.join(self.source_dir, '%s_SPLIT.pdf' % stem)
rname = '%s.pdf' % stem
else:
fname = os.path.join(self.source_dir, '%s%d_SPLIT.pdf' % (stem, filedigit))
rname = '%s%d.pdf' % (stem, filedigit)
write_page(fname, obj, pagenum)
if self.cropped.count(rname):
self.cropped.remove(rname)
filedigit += 1
obj.stream.close()
rename_files(self.source_dir, '_SPLIT.pdf')
def crop(self):
'''For each file in the directory, start a subprocess (within multiprocess)
to crop the file. Rename the files to overwrite the original when all
processes are complete.
'''
processes = dict()
filenames = [x for x in os.listdir(self.source_dir)
if x not in self.cropped and x.endswith('.pdf')]
if filenames:
print 'Cropping %d files' % len(filenames)
for name in filenames:
processes[name] = multiprocessing.Process(target=self.crop_process, args=(name,))
processes[name].start()
for name in processes:
processes[name].join()
print
rename_files(self.source_dir, '_CROP.pdf')
def crop_process(self, name):
'''Get the bounding box for each file and set the new dimensions
on the page object. Write the page object to disk.
'''
fullname = os.path.join(self.source_dir, name)
obj = pyPdf.PdfFileReader(open(fullname, 'rb'))
print '+',
bounds = get_bbox(self.ghostscript, fullname)
if bounds and int(sum(bounds)):
lx, ly, ux, uy = bounds
page = obj.getPage(0)
page.mediaBox.lowerLeft = lx, ly
page.mediaBox.lowerRight = ux, ly
page.mediaBox.upperLeft = lx, uy
page.mediaBox.upperRight = ux, uy
new_name = os.path.join(self.source_dir, '%s_CROP.pdf' % os.path.splitext(name)[0])
write_page(new_name, obj, 0, crop=True)
def main(args):
t0 = time.clock()
f = PDFFixer(args)
if not args.nosplit:
f.split()
f.crop()
print 'Finished: ', time.clock() - t0, ' processing seconds'
if __name__ == '__main__':
'''Set up command line arguments
If you use pyinstaller to create an executable, you must include
`multiprocessing.freeze_support` on the Windows platform.
Arguments:
:--dir: Specify the directory containing the PDF files.
The default is a directory `pdf` directly under the
current working directory.
:--nosplit: Omit the splitting and re-numbering process.
Use this if you want only to crop the PDF files.
:--ghostscript: Specify the full path to the Ghostscript executable
'''
if sys.platform.startswith('win'):
multiprocessing.freeze_support()
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.path.join(os.getcwd(), 'pdf'))
parser.add_argument('--nosplit', action='store_true')
parser.add_argument('--ghostscript', default='c:\\bin\\gswin64c ')
args = parser.parse_args()
main(args)
| mit | -8,395,881,128,080,925,000 | 34.44507 | 99 | 0.589128 | false |
nemesiscodex/JukyOS-sugar | src/jarabe/controlpanel/inlinealert.py | 1 | 2747 | # Copyright (C) 2008, OLPC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
import gobject
import pango
from sugar.graphics import style
from sugar.graphics.icon import Icon
class InlineAlert(gtk.HBox):
"""UI interface for Inline alerts
Inline alerts are different from the other alerts beause they are
no dialogs, they only inform about a current event.
Properties:
'msg': the message of the alert,
'icon': the icon that appears at the far left
See __gproperties__
"""
__gtype_name__ = 'SugarInlineAlert'
__gproperties__ = {
'msg': (str, None, None, None, gobject.PARAM_READWRITE),
'icon': (object, None, None, gobject.PARAM_WRITABLE),
}
def __init__(self, **kwargs):
self._msg = None
self._msg_color = None
self._icon = Icon(icon_name='emblem-warning',
fill_color=style.COLOR_SELECTION_GREY.get_svg(),
stroke_color=style.COLOR_WHITE.get_svg())
self._msg_label = gtk.Label()
self._msg_label.set_max_width_chars(50)
self._msg_label.set_ellipsize(pango.ELLIPSIZE_MIDDLE)
self._msg_label.set_alignment(0, 0.5)
self._msg_label.modify_fg(gtk.STATE_NORMAL,
style.COLOR_SELECTION_GREY.get_gdk_color())
gobject.GObject.__init__(self, **kwargs)
self.set_spacing(style.DEFAULT_SPACING)
self.modify_bg(gtk.STATE_NORMAL,
style.COLOR_WHITE.get_gdk_color())
self.pack_start(self._icon, False)
self.pack_start(self._msg_label, False)
self._msg_label.show()
self._icon.show()
def do_set_property(self, pspec, value):
if pspec.name == 'msg':
if self._msg != value:
self._msg = value
self._msg_label.set_markup(self._msg)
elif pspec.name == 'icon':
if self._icon != value:
self._icon = value
def do_get_property(self, pspec):
if pspec.name == 'msg':
return self._msg
| gpl-2.0 | -3,075,420,153,367,084,000 | 32.91358 | 77 | 0.624681 | false |
FlintHill/SUAS-Competition | tests/unit_tests/test_sda_waypoint_holder.py | 1 | 1806 | import unittest
from SDA import *
import numpy
class WaypoinHolderTestCase(unittest.TestCase):
def setUp(self):
self.waypoints = numpy.array([[1,2,3],[4,5,6]]) #Must remain a 2D array
def test_add_waypoint(self):
single_waypoint = numpy.array([7,8,9])
test_waypoint_holder = WaypointHolder(self.waypoints)
test_waypoint_holder.add_waypoint(single_waypoint)
self.assertEqual(len(test_waypoint_holder.waypoints), len(self.waypoints) + 1)
def test_add_empty_waypoint(self):
empty_waypoint = numpy.array([])
single_waypoint = numpy.array([7,8,9])
test_waypoint_holder = WaypointHolder(empty_waypoint)
test_waypoint_holder.add_waypoint(single_waypoint)
self.assertEqual(test_waypoint_holder.waypoints[0][2], single_waypoint[2])
def test_get_current_waypoint(self):
test_waypoint_holder = WaypointHolder(self.waypoints)
self.assertTrue(numpy.array_equal(test_waypoint_holder.get_current_waypoint(), self.waypoints[0]))
def test_reached_any_waypoint(self):
far_point = numpy.array([10,11,12])
close_point = numpy.array([1,2,3])
distance_to_target = 3
test_waypoint_holder = WaypointHolder(self.waypoints)
self.assertFalse(test_waypoint_holder.reached_any_waypoint(far_point, distance_to_target))
test_waypoint_holder = WaypointHolder(self.waypoints)
self.assertTrue(test_waypoint_holder.reached_any_waypoint(close_point, distance_to_target))
def test_reached_current_waypoint(self):
test_waypoint_holder = WaypointHolder(self.waypoints)
self.assertTrue(test_waypoint_holder.reached_current_waypoint(self.waypoints[0]))
self.assertTrue(test_waypoint_holder.reached_current_waypoint(self.waypoints[1]))
| mit | 8,292,292,180,833,046,000 | 40.045455 | 106 | 0.699336 | false |
boehlke/OpenSlides | openslides/utils/main.py | 1 | 11990 | import argparse
import ctypes
import os
import sys
import tempfile
import threading
import time
import webbrowser
from typing import Dict, Optional
from django.conf import ENVIRONMENT_VARIABLE
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import get_random_string
from mypy_extensions import NoReturn
DEVELOPMENT_VERSION = "Development Version"
UNIX_VERSION = "Unix Version"
WINDOWS_VERSION = "Windows Version"
WINDOWS_PORTABLE_VERSION = "Windows Portable Version"
class PortableDirNotWritable(Exception):
pass
class PortIsBlockedError(Exception):
pass
class DatabaseInSettingsError(Exception):
pass
class UnknownCommand(Exception):
pass
class ExceptionArgumentParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
raise UnknownCommand(message)
def detect_openslides_type() -> str:
"""
Returns the type of this OpenSlides version.
"""
if sys.platform == "win32":
if os.path.basename(sys.executable).lower() == "openslides.exe":
# Note: sys.executable is the path of the *interpreter*
# the portable version embeds python so it *is* the interpreter.
# The wrappers generated by pip and co. will spawn the usual
# python(w).exe, so there is no danger of mistaking them
# for the portable even though they may also be called
# openslides.exe
openslides_type = WINDOWS_PORTABLE_VERSION
else:
openslides_type = WINDOWS_VERSION
else:
openslides_type = UNIX_VERSION
return openslides_type
def get_default_settings_dir(openslides_type: str = None) -> str:
"""
Returns the default settings path according to the OpenSlides type.
The argument 'openslides_type' has to be one of the three types mentioned in
openslides.utils.main.
"""
if openslides_type is None:
openslides_type = detect_openslides_type()
if openslides_type == UNIX_VERSION:
parent_directory = os.environ.get(
"XDG_CONFIG_HOME", os.path.expanduser("~/.config")
)
elif openslides_type == WINDOWS_VERSION:
parent_directory = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
parent_directory = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return os.path.join(parent_directory, "openslides")
def get_local_settings_dir() -> str:
"""
Returns the path to a local settings.
On Unix systems: 'personal_data/var/'
"""
return os.path.join("personal_data", "var")
def setup_django_settings_module(
settings_path: str = None, local_installation: bool = False
) -> None:
"""
Sets the environment variable ENVIRONMENT_VARIABLE, that means
'DJANGO_SETTINGS_MODULE', to the given settings.
If no settings_path is given and the environment variable is already set,
then this function does nothing.
If the argument settings_path is set, then the environment variable is
always overwritten.
"""
if settings_path is None and os.environ.get(ENVIRONMENT_VARIABLE, ""):
return
if settings_path is None:
if local_installation:
settings_dir = get_local_settings_dir()
else:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, "settings.py")
settings_file = os.path.basename(settings_path)
settings_module_name = ".".join(settings_file.split(".")[:-1])
if "." in settings_module_name:
raise ImproperlyConfigured(
"'.' is not an allowed character in the settings-file"
)
# Change the python path. Also set the environment variable python path, so
# change of the python path also works after a reload
settings_module_dir = os.path.abspath(os.path.dirname(settings_path))
sys.path.insert(0, settings_module_dir)
try:
os.environ["PYTHONPATH"] = os.pathsep.join(
(settings_module_dir, os.environ["PYTHONPATH"])
)
except KeyError:
# The environment variable is empty
os.environ["PYTHONPATH"] = settings_module_dir
# Set the environment variable to the settings module
os.environ[ENVIRONMENT_VARIABLE] = settings_module_name
def get_default_settings_context(user_data_dir: str = None) -> Dict[str, str]:
"""
Returns the default context values for the settings template:
'openslides_user_data_path', 'import_function' and 'debug'.
The argument 'user_data_path' is a given path for user specific data or None.
"""
# Setup path for user specific data (SQLite3 database, media, ...):
# Take it either from command line or get default path
default_context = {}
if user_data_dir:
default_context["openslides_user_data_dir"] = repr(user_data_dir)
default_context["import_function"] = ""
else:
openslides_type = detect_openslides_type()
if openslides_type == WINDOWS_PORTABLE_VERSION:
default_context[
"openslides_user_data_dir"
] = "get_win32_portable_user_data_dir()"
default_context[
"import_function"
] = "from openslides.utils.main import get_win32_portable_user_data_dir"
else:
data_dir = get_default_user_data_dir(openslides_type)
default_context["openslides_user_data_dir"] = repr(
os.path.join(data_dir, "openslides")
)
default_context["import_function"] = ""
default_context["debug"] = "False"
return default_context
def get_default_user_data_dir(openslides_type: str) -> str:
"""
Returns the default directory for user specific data according to the OpenSlides
type.
The argument 'openslides_type' has to be one of the three types mentioned
in openslides.utils.main.
"""
if openslides_type == UNIX_VERSION:
default_user_data_dir = os.environ.get(
"XDG_DATA_HOME", os.path.expanduser("~/.local/share")
)
elif openslides_type == WINDOWS_VERSION:
default_user_data_dir = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
default_user_data_dir = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return default_user_data_dir
def get_win32_app_data_dir() -> str:
"""
Returns the directory of Windows' AppData directory.
"""
shell32 = ctypes.WinDLL("shell32.dll") # type: ignore
SHGetFolderPath = shell32.SHGetFolderPathW
SHGetFolderPath.argtypes = (
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_uint32,
ctypes.c_wchar_p,
)
SHGetFolderPath.restype = ctypes.c_uint32
CSIDL_LOCAL_APPDATA = 0x001C
MAX_PATH = 260
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = SHGetFolderPath(0, CSIDL_LOCAL_APPDATA, 0, 0, buf)
if res != 0:
# TODO: Write other exception
raise Exception("Could not determine Windows' APPDATA path")
return buf.value # type: ignore
def get_win32_portable_dir() -> str:
"""
Returns the directory of the Windows portable version.
"""
# NOTE: sys.executable will be the path to openslides.exe
# since it is essentially a small wrapper that embeds the
# python interpreter
portable_dir = os.path.dirname(os.path.abspath(sys.executable))
try:
fd, test_file = tempfile.mkstemp(dir=portable_dir)
except OSError:
raise PortableDirNotWritable(
"Portable directory is not writeable. "
"Please choose another directory for settings and data files."
)
else:
os.close(fd)
os.unlink(test_file)
return portable_dir
def get_win32_portable_user_data_dir() -> str:
"""
Returns the user data directory to the Windows portable version.
"""
return os.path.join(get_win32_portable_dir(), "openslides")
def write_settings(
settings_dir: str = None,
settings_filename: str = "settings.py",
template: str = None,
**context: str,
) -> str:
"""
Creates the settings file at the given dir using the given values for the
file template.
Retuns the path to the created settings.
"""
if settings_dir is None:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, settings_filename)
if template is None:
with open(
os.path.join(os.path.dirname(__file__), "settings.py.tpl")
) as template_file:
template = template_file.read()
# Create a random SECRET_KEY to put it in the settings.
# from django.core.management.commands.startproject
chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
context.setdefault("secret_key", get_random_string(50, chars))
for key, value in get_default_settings_context().items():
context.setdefault(key, value)
content = template % context
settings_module = os.path.realpath(settings_dir)
if not os.path.exists(settings_module):
os.makedirs(settings_module)
with open(settings_path, "w") as settings_file:
settings_file.write(content)
if context["openslides_user_data_dir"] == "get_win32_portable_user_data_dir()":
openslides_user_data_dir = get_win32_portable_user_data_dir()
else:
openslides_user_data_dir = context["openslides_user_data_dir"].strip("'")
os.makedirs(os.path.join(openslides_user_data_dir, "static"), exist_ok=True)
return os.path.realpath(settings_path)
def open_browser(host: str, port: int) -> None:
"""
Launches the default web browser at the given host and port and opens
the webinterface. Uses start_browser internally.
"""
if host == "0.0.0.0":
# Windows does not support 0.0.0.0, so use 'localhost' instead
start_browser(f"http://localhost:{port}")
else:
start_browser(f"http://{host}:{port}")
def start_browser(browser_url: str) -> None:
"""
Launches the default web browser at the given url and opens the
webinterface.
"""
try:
browser = webbrowser.get()
except webbrowser.Error:
print("Could not locate runnable browser: Skipping start")
else:
def function() -> None:
# TODO: Use a nonblocking sleep event here. Tornado has such features.
time.sleep(1)
browser.open(browser_url)
thread = threading.Thread(target=function)
thread.start()
def get_database_path_from_settings() -> Optional[str]:
"""
Retrieves the database path out of the settings file. Returns None,
if it is not a SQLite3 database.
Needed for the backupdb command.
"""
from django.conf import settings as django_settings
from django.db import DEFAULT_DB_ALIAS
db_settings = django_settings.DATABASES
default = db_settings.get(DEFAULT_DB_ALIAS)
if not default:
raise DatabaseInSettingsError("Default databases is not configured")
database_path = default.get("NAME")
if not database_path:
raise DatabaseInSettingsError("No path or name specified for default database.")
if default.get("ENGINE") != "django.db.backends.sqlite3":
database_path = None
return database_path
def is_local_installation() -> bool:
"""
Returns True if the command is called for a local installation
This is the case if manage.py is used, or when the --local-installation flag is set.
"""
return (
True
if "--local-installation" in sys.argv or "manage.py" in sys.argv[0]
else False
)
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32"
| mit | 8,656,421,167,624,361,000 | 31.849315 | 88 | 0.654879 | false |
kupospelov/classify | classify/model.py | 1 | 6467 | import tensorflow as tf
import numpy as np
from classify.lookup import Lookup
from classify.util.logger import Logger
from classify.util.timer import Timer
class Model:
"""The neural network model."""
SCOPE_NAME = 'model'
DEFAULT_PATH = './model/model.ckpt'
def __init__(self, indexer, params, save_path=DEFAULT_PATH):
self.log = Logger.create(self)
self.max_length = params.max_length
self.batch_size = params.batch_size
self.num_hidden = params.num_hidden
self.keep_prob = params.keep_prob
self.num_layers = params.num_layers
self.epoch = params.epoch
self.error = params.error
self.save_path = save_path
self.vector_dims = indexer.dimensions
self.session = tf.Session(graph=tf.Graph())
self.graph = self.reuse_graph()
self.lookup = Lookup(indexer, self.max_length)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
self.close()
@Timer('Training finished')
def train(self, inputs, outputs):
length = len(inputs)
self.log.debug('Training set: %d samples.', length)
self.session.run(tf.variables_initializer(self.graph['variables']))
for i in range(self.epoch):
self.log.debug('Epoch %3d/%3d...', i + 1, self.epoch)
errors = 0
for blen, binp, bout in self.batch(inputs, outputs):
vectors = self.lookup[binp]
self.session.run(
self.graph['minimize'],
feed_dict={
self.graph['data']: vectors,
self.graph['target']: bout,
self.graph['lengths']: blen,
self.graph['keep_prob']: self.keep_prob
})
errors += self.session.run(
self.graph['error'],
feed_dict={
self.graph['data']: vectors,
self.graph['target']: bout,
self.graph['lengths']: blen,
self.graph['keep_prob']: 1.0
})
epoch_error = 100.0 * errors / length
self.log.debug('Errors: %d (%3.1f%%)', errors, epoch_error)
if epoch_error < self.error:
self.log.debug('The desired accuracy achieved.')
break
@Timer('Saved')
def save(self):
saver = tf.train.Saver(self.graph['variables'])
saver.save(self.session, self.save_path)
@Timer('Restored')
def restore(self):
saver = tf.train.Saver(self.graph['variables'])
saver.restore(self.session, self.save_path)
def predict(self, tests):
result = []
for blen, binp, _ in self.batch(tests, []):
vectors = self.lookup[binp]
result.extend(self.session.run(
self.graph['prediction'],
feed_dict={
self.graph['data']: vectors,
self.graph['lengths']: blen,
self.graph['keep_prob']: 1.0
}))
return result
def close(self):
self.session.close()
self.lookup.close()
self.log.debug('Finished.')
def reuse_graph(self):
with self.session.graph.as_default():
with tf.variable_scope(self.SCOPE_NAME) as scope:
return self.build_graph()
scope.reuse_variables()
def build_graph(self):
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
lengths = tf.placeholder(tf.int32, [self.batch_size], 'lengths')
data = tf.placeholder(
tf.float32,
[self.batch_size, self.max_length, self.vector_dims],
'data')
target = tf.placeholder(tf.float32, [self.batch_size, 2], 'target')
layers = [tf.contrib.rnn.DropoutWrapper(
tf.contrib.rnn.GRUCell(self.num_hidden),
output_keep_prob=keep_prob) for _ in range(self.num_layers)]
multicell = tf.contrib.rnn.MultiRNNCell(layers)
val, state = tf.nn.dynamic_rnn(
multicell, data, sequence_length=lengths, dtype=tf.float32)
# An approach to handle variable length sequences
idx = tf.range(tf.shape(data)[0]) * self.max_length + (lengths - 1)
last = tf.gather(tf.reshape(val, [-1, self.num_hidden]), idx)
weight = tf.get_variable(
'weight',
initializer=tf.truncated_normal([self.num_hidden, 2]))
bias = tf.get_variable('bias', initializer=tf.constant(0.1, shape=[2]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(prediction))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_sum(tf.cast(mistakes, tf.int32))
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self.SCOPE_NAME)
return {
'data': data,
'target': target,
'lengths': lengths,
'keep_prob': keep_prob,
'prediction': prediction,
'minimize': minimize,
'error': error,
'variables': variables
}
def batch(self, inputs, outputs):
for i in range(0, len(inputs), self.batch_size):
# First align the second dimension to the max sequence length
blen = []
binp = []
bout = outputs[i: i + self.batch_size]
for e in inputs[i:i + self.batch_size]:
length = len(e)
blen.append(length)
binp.append(np.pad(e,
((0, self.max_length - length)),
'constant').tolist())
# Then align the first dimension to the batch size
diff = self.batch_size - len(binp)
if diff > 0:
blen = np.pad(blen, ((0, diff)), 'constant').tolist()
binp = np.pad(binp, ((0, diff), (0, 0)), 'constant').tolist()
if outputs:
bout = np.pad(
bout, ((0, diff), (0, 0)), 'constant').tolist()
yield blen, binp, bout
| mit | -8,851,315,992,157,304,000 | 34.338798 | 79 | 0.529303 | false |
gpocentek/python-gitlab | tools/functional/ee-test.py | 1 | 4049 | #!/usr/bin/env python
import gitlab
P1 = "root/project1"
P2 = "root/project2"
MR_P1 = 1
I_P1 = 1
I_P2 = 1
EPIC_ISSUES = [4, 5]
G1 = "group1"
LDAP_CN = "app1"
LDAP_PROVIDER = "ldapmain"
def start_log(message):
print("Testing %s... " % message, end="")
def end_log():
print("OK")
gl = gitlab.Gitlab.from_config("ee")
project1 = gl.projects.get(P1)
project2 = gl.projects.get(P2)
issue_p1 = project1.issues.get(I_P1)
issue_p2 = project2.issues.get(I_P2)
group1 = gl.groups.get(G1)
mr = project1.mergerequests.get(1)
start_log("MR approvals")
approval = project1.approvals.get()
v = approval.reset_approvals_on_push
approval.reset_approvals_on_push = not v
approval.save()
approval = project1.approvals.get()
assert v != approval.reset_approvals_on_push
project1.approvals.set_approvers(1, [1], [])
approval = project1.approvals.get()
assert approval.approvers[0]["user"]["id"] == 1
approval = mr.approvals.get()
approval.approvals_required = 2
approval.save()
approval = mr.approvals.get()
assert approval.approvals_required == 2
approval.approvals_required = 3
approval.save()
approval = mr.approvals.get()
assert approval.approvals_required == 3
mr.approvals.set_approvers(1, [1], [])
approval = mr.approvals.get()
assert approval.approvers[0]["user"]["id"] == 1
ars = project1.approvalrules.list(all=True)
assert len(ars) == 0
project1.approvalrules.create(
{"name": "approval-rule", "approvals_required": 1, "group_ids": [group1.id]}
)
ars = project1.approvalrules.list(all=True)
assert len(ars) == 1
assert ars[0].approvals_required == 2
ars[0].save()
ars = project1.approvalrules.list(all=True)
assert len(ars) == 1
assert ars[0].approvals_required == 2
ars[0].delete()
ars = project1.approvalrules.list(all=True)
assert len(ars) == 0
end_log()
start_log("geo nodes")
# very basic tests because we only have 1 node...
nodes = gl.geonodes.list()
status = gl.geonodes.status()
end_log()
start_log("issue links")
# bit of cleanup just in case
for link in issue_p1.links.list():
issue_p1.links.delete(link.issue_link_id)
src, dst = issue_p1.links.create({"target_project_id": P2, "target_issue_iid": I_P2})
links = issue_p1.links.list()
link_id = links[0].issue_link_id
issue_p1.links.delete(link_id)
end_log()
start_log("LDAP links")
# bit of cleanup just in case
if hasattr(group1, "ldap_group_links"):
for link in group1.ldap_group_links:
group1.delete_ldap_group_link(link["cn"], link["provider"])
assert gl.ldapgroups.list()
group1.add_ldap_group_link(LDAP_CN, 30, LDAP_PROVIDER)
group1.ldap_sync()
group1.delete_ldap_group_link(LDAP_CN)
end_log()
start_log("boards")
# bit of cleanup just in case
for board in project1.boards.list():
if board.name == "testboard":
board.delete()
board = project1.boards.create({"name": "testboard"})
board = project1.boards.get(board.id)
project1.boards.delete(board.id)
for board in group1.boards.list():
if board.name == "testboard":
board.delete()
board = group1.boards.create({"name": "testboard"})
board = group1.boards.get(board.id)
group1.boards.delete(board.id)
end_log()
start_log("push rules")
pr = project1.pushrules.get()
if pr:
pr.delete()
pr = project1.pushrules.create({"deny_delete_tag": True})
pr.deny_delete_tag = False
pr.save()
pr = project1.pushrules.get()
assert pr is not None
assert pr.deny_delete_tag is False
pr.delete()
end_log()
start_log("license")
license = gl.get_license()
assert "user_limit" in license
try:
gl.set_license("dummykey")
except Exception as e:
assert "The license key is invalid." in e.error_message
end_log()
start_log("epics")
epic = group1.epics.create({"title": "Test epic"})
epic.title = "Fixed title"
epic.labels = ["label1", "label2"]
epic.save()
epic = group1.epics.get(epic.iid)
assert epic.title == "Fixed title"
assert len(group1.epics.list())
# issues
assert not epic.issues.list()
for i in EPIC_ISSUES:
epic.issues.create({"issue_id": i})
assert len(EPIC_ISSUES) == len(epic.issues.list())
for ei in epic.issues.list():
ei.delete()
epic.delete()
end_log()
| lgpl-3.0 | 9,218,844,984,762,932,000 | 24.465409 | 85 | 0.702643 | false |
masscaptcha/masscaptcha | masscaptcha.py | 1 | 1051 | import requests
import random
import subprocess
import sys
def random_line(filename):
with open(filename) as f:
lines = [x.rstrip() for x in f.readlines()]
return random.choice(lines)
with open("users.txt") as usersfile:
users = [x.rstrip().split(" ") for x in usersfile.readlines()]
captcha = sys.argv[1]
for username, password, wifi_user, wifi_pass in users:
subprocess.call(["sudo bash ./wifi.sh " + wifi_user + " " + wifi_pass + " &>/dev/null"], shell=True)
session = requests.Session()
session.trust_env = False
credentials = {'ctl00$MainContent$Email':username, 'ctl00$MainContent$Password': password, 'ctl00$MainContent$captcha' : captcha}
headers = {'User-Agent': random_line("useragents.txt")}
#simulate normal behavior (first request the login page)
session.get('https://catalog.inf.elte.hu/Account/Login')
response = session.post('https://catalog.inf.elte.hu/Account/Login', data=credentials, headers=headers)
if "logged in" in response.text:
print "Successfully logged in " + str(username)
| mit | 7,955,093,874,446,160,000 | 28.194444 | 130 | 0.704091 | false |
pyta-uoft/pyta | nodes/if_exp.py | 1 | 1115 | """
IfExp astroid node
An if statement written in an expression form.
(IfExp node represents an expression, not a statement.)
Attributes:
- test (NodeNG)
- Holds a single node such as Compare to evaluate the truth condition of.
- body (NodeNG)
- A Node representing the suite to be executed when the if expression
evalutes to True.
- orelse (NodeNG)
- The Node representing the suite to be executed when the if expression
evaluates to False.
Example 1:
IfExp(
test=Const(value=True),
body=Const(value=1),
orelse=Const(value=0))
Example 2:
IfExp(
test=Compare(
left=Name(name='eval_expr'),
ops=[['==', Name(name='expected')]]),
body=BinOp(
op='+',
left=Name(name='x'),
right=Name(name='y')),
orelse=Name(name='something'))
Type-checking:
The type of the expression is the same as the type of the body and orelse expressions
(they must have the same type).
"""
# Example 1
1 if True else 0
# Example 2
x + y if eval_expr == expected else something
| gpl-3.0 | 28,458,848,889,594,796 | 24.340909 | 89 | 0.622422 | false |
3ll34ndr0/10cur4 | dbapi.py | 1 | 33524 | #!/usr/bin/python
# coding: utf-8
import sqlite3
import json
from horario import Horario
# Handle hours and date
from datetime import datetime, timedelta,date
from datetime import time as tm
from pytz import timezone
import pytz
import locale
from time import time,localtime,strftime
# This should be tied to a configuration file:
locale.setlocale(locale.LC_ALL,'es_AR.utf8')
#import sys
#sys.path.append('/home/lean/arena/10cur4')
#from manager import ManageAppointments
class ActivityRegister(object):
"""
This class is intended to create an object to handle the activity register in database.
"""
def __init__(self,
database,
activity,
initHour,
endHour=None,
quota='1'):
self.database = database
self.activity = activity
if type(initHour) is str:
print("Oh sia")
self.initHour = initHour
else:
self.initHour = dateTime2Epoch(initHour)
initHour = self.initHour
self.endHour = endHour
self.quota = quota
# Try if the appointment exists:
try:
areg = getActivityRegister(database,activity)
ar = json.loads(getActivityRegister(database,activity)[1])
ar['horarios'][initHour] # Esto es muy importante, se explica por la linea de arriba.
except KeyError as e:
objetoHorario = self.loadReg()
print("Un horario a las {} no existe, sera creado..".format(initHour))
objetoHorario.addAppointment(initHour,endHour,quota=quota)
try:
# Because we added one new initHour, need to write it to database
self.writeDatabase(objetoHorario)
# Update this object with database values <- This caused a bug
# when used from class ManageAppointments, when initHour was
# a new value to add. It seems that it was copied from other
# method of this class. Calling __init__ from __init__ when
# used from a super class caused a AtributeError.
#self.__init__(self.database,self.activity,self.initHour)
areg = getActivityRegister(database,activity)
ar = json.loads(getActivityRegister(database,activity)[1])
except AttributeError as e:
raise e
except Exception as e:
#"Failed trying to write to database"
raise e
areg = getActivityRegister(database,activity)
ar = json.loads(getActivityRegister(database,activity)[1])
except TypeError as e:
print("La actividad {} no existe, sera creada...".format(activity))
createActivityRegister(database,activity,initHour,endHour,quota=quota)
areg = getActivityRegister(database,activity)
ar = json.loads(getActivityRegister(database,activity)[1])
self.endHour = ar['horarios'][initHour][0]
self.quota = ar['horarios'][initHour][1]
self.participants = ar['horarios'][initHour][2]
self.description = areg[3]
self.vCalendar = areg[4]
self.name = areg[0]
self.defaultQuota = areg[2]
def reportAvailableAppointments(self, onDay = None, untilDay = None,
humanOutput = False):
"""onDay is expected to be a datetime object """
print("DEBUG: onDay = {}".format(onDay))
if onDay is None: # For today
fromTimeEpoch = time() # From now on
toTimeEpoch = formatDate((date.today() + timedelta(1)).timetuple()[0:5])[2]
else: # Or any other day
#fromTimeEpoch = formatDate(onDay.timetuple()[0:5])[2]
fromTimeEpoch = dateTime2Epoch(onDay)
if untilDay is not None:
# toTimeEpoch = formatDate((untilDay + timedelta(1)).timetuple()[0:5])[2]
toTimeEpoch = dateTime2Epoch(untilDay)
else:
toTimeEpoch = fromTimeEpoch + 86400 # plus one day in seconds
# first get the initHours for the day
print("And the activity is: {}".format(self.activity))
appointmentsHours = list(json.loads(getActivityRegister(self.database,self.activity)[1])['horarios'].keys())
print(appointmentsHours)
appointmentsHours.sort()
appointmentsForTheday = [ap for ap in appointmentsHours if float(ap) > fromTimeEpoch and float(ap) < toTimeEpoch]
print(appointmentsForTheday)
if humanOutput is True:
if len(appointmentsForTheday) == 0:
reply = "" #The neutro element for string concatenation hehe
else:
print("entra al humanOutput")
magic = datetime.fromtimestamp
appss = [magic(float(x)).strftime("%c").rstrip('00 ').rstrip(':') for x in appointmentsForTheday]
formatedAppss = "*{}:*\n".format(self.activity)
for date in appss:
formatedAppss += date + '\n'
print(formatedAppss)
reply = formatedAppss
else:
reply = appointmentsForTheday
return reply
def mkAppointment(self,participants):
"""
Make the appointment for the self.activity and the given telephone number
"""
return self.update(participants=participants)
def rawReport(self):
"""Outputs all users and its data
in a given activity at an initHour
Returns:
(activity,initHour),["name1, credits1 @
expireDate1","name2, credits2 @ expireDate2",...,"nameN, creditsN @ expireDateN"]
"""
sortedPeople = list()
people = self.getParticipantsName()
people.sort(key=lambda vence: vence.name) # sort by name
for c in people:
sortedPeople.append(c.name+", "+c.credits+" @ "+c.expDate.rstrip(' ')+" ("+c.phone+")") #ACA ver que hacer con repr(c.expDate)
initHourEpoch = formatDate(localtime(float(self.initHour))[0:5])[0:2]
#datetime(y,m,d,h,mi,s).strftime("%c").decode('utf-8')
rawData = ('{},{}'.format(self.activity,initHourEpoch),sortedPeople) # a tuple with string and other string
return rawData
def howMuch(self):
return len(self.participants)
def periodReport(self, period):
"""Expects an iterable with valid initHours on it. 'period' is
day,week,month in the language defined"""
today = date.today()
todayEpoch = formatDate(today.timetuple()[0:5])[2]
todayAtZeroAM = datetime.combine(today,tm(0,0))
todayAtZeroAME = formatDate(todayAtZeroAM.timetuple()[0:5])[2]
tomorrowAtZeroAM = todayAtZeroAM + timedelta(days=1)
tomorrowAtZeroAME = formatDate(tomorrowAtZeroAM.timetuple()[0:5])[2]
lastWeek = todayAtZeroAM - timedelta(days=7)
lastWeekEpoch = formatDate(lastWeek.timetuple()[0:5])[2]
lastMonth = todayAtZeroAM - timedelta(days=30)
lastMonthEpoch = formatDate(lastMonth.timetuple()[0:5])[2]
# The next line is very criptic, but it really gets the job done:
appointmentsHours = json.loads(getActivityRegister(self.database,self.activity)[1])['horarios'].keys()
if period is "mensual":
timeRange = [ihs for ihs in appointmentsHours if float(ihs) > lastMonthEpoch and float(ihs) < todayEpoch]
reportList = ['Reporte mensual:']
if period is "semanal":
timeRange = [ihs for ihs in appointmentsHours if float(ihs) > lastWeekEpoch and float(ihs) < todayAtZeroAME]
reportList = ['Reporte semanal:']
if period is "diario":
timeRange = [ihs for ihs in appointmentsHours if float(ihs) > todayAtZeroAME and float(ihs) < tomorrowAtZeroAME]
reportList = ['Reporte del día:']
for initHour in timeRange:
ar = ActivityRegister(self.database, self.activity, initHour)
reportList.append(ar.rawReport())
return reportList,timeRange
def update(self,
endHour=None,
quota='1',
participants=None,
description=None,
vCalendar=None):
"""Method to update any value from activity.
Optional params:
endHour,
quota,
participants (If phone numbers are given with the '-' sign, they will be
deleted),
description,
vCalendar.
"""
# Update endHour and quota:
if endHour == None:
if hasattr(self,'endHour'):
endHour = self.endHour
else:
self.endHour = endHour
if quota == '1':
if hasattr(self,'quota'):
quota = self.quota
else:
self.quota = quota
#
objetoHorario = self.loadReg()
# Modify temporarly Horario object with updated values, except for participants
if hasattr(self,'participants'):
objetoHorario.addAppointment(self.initHour,endHour,quota,self.participants)
else:
objetoHorario.addAppointment(self.initHour,endHour,quota)
if participants is not None:
delParticipants = []
addParticipants = []
if type(participants) is str:
if participants.startswith('-'):
# create remove
delParticipants = participants.strip('-')
elif participants.isdigit():
# create Add
addParticipants = participants
else:
print("Participant is not a valid telephon number")
elif type(participants) is list:
# Create a list with numbers to remove from participants:
delParticipants = set([item.strip('-') for item in participants if item.startswith('-')])
# Create a list with numbers to add to participants:
addParticipants = set([item for item in participants if item.isdigit()])
# Remove participants
message = objetoHorario.removeParticipant(self.initHour,delParticipants)
# Add participants
message = objetoHorario.addParticipant(self.initHour,addParticipants)
# Now that everything was done this auxiliary Horario object, dump it to DDDBB:
# Write to database
self.writeDatabase(objetoHorario,description=description,vCalendar=vCalendar)
# Update this object with database values
print("En tioria se grabo la database, y el mje es:{}".format(message))
if type(self.initHour) is str: # TODO: check if self is a
# ManageAppointment instance, instead
self.__init__(self.database,self.activity,self.initHour)
else:
from datetime import datetime
magic = datetime.fromtimestamp
print(type(self.initHour))
self.__init__(self.database,self.activity,magic(float(self.initHour)))
return message
#END of def update
def cancelAppointment(self, participants):
"""Method to cancel the appointment of 'participants' from the current initHour"""
# TODO: ACA SEGUIR el problema es que tengo que construir correctamente el objeto horario
# sin perder información para poder borrar sòlo los participantes.
objetoHorario = self.loadReg()
# Remove participants
objetoHorario.removeParticipant(self.initHour,participants)
# Write to database
self.writeDatabase(objetoHorario)
# Update object with database values
self.__init__(self.database,self.activity,self.initHour)
def remove(self,participants=None,initHour=None):
"""
Method to remove participants, OR erase all information for a given initHour
"""
#Me parece un bug que initHour no checkee por None
if (participants or initHour) is None:
return
objetoHorario = Horario(self.name, self.initHour,self.endHour,self.quota,self.participants)
if (participants is not None and initHour is not None):
return
print("You can not use this method this way. You can delete either participants of the current initHour OR all information of a given initHour, not both.")
if participants is not None:
objetoHorario.removeParticipant(self.initHour,participants)
# Write to database
self.writeDatabase(objetoHorario)
# Update object with database values
self.__init__(self.database,self.activity,self.initHour)
if initHour is not None: # 'Erase' all information from activity at initHour
objetoHorario = Horario(self.name,self.initHour,'')
description=''
vCalendar =''
# Write to database
self.writeDatabase(objetoHorario,description=description,vCalendar=vCalendar)
# Update object with database values
self.__init__(self.database,self.activity,self.initHour)
def deleteInitHour(self):
"""
Method to delete the appointment at the self.initHour for the self.activity
"""
objetoHorario = self.loadReg()
# Remove participants
answer = objetoHorario.deleteAppointment(self.initHour) # TODO: take into account
# that if the appointment had somebody subscribed to, it should at least warn the
# admin, or mesage the numbers in the list.
reply = "Se eliminó la actividad {} a las {}".format(self.activity, self.initHour)
# Write to database
self.writeDatabase(objetoHorario)
# Update object with database values
self.__init__(self.database,self.activity,None) # None initHour given because it was deleted
return reply
def writeDatabase(self,
objetoHorario,
description=None,
vCalendar=None):
"""
Useful method that only writes to DDBB
"""
horariosJSON = json.dumps(objetoHorario, default=jdefault)
print("A grabar la database!!!!!!!!!!!!!!")
try:
db = sqlite3.connect(self.database)
cursor = db.cursor()
# Aca va un update only horario column.
cursor.execute(
'''UPDATE activityCalendar SET horarios = ? WHERE act = ? ''', (horariosJSON, self.activity))
message = "Message: {}, ".format(horariosJSON)
if description is not None:
cursor.execute(
'''UPDATE activityCalendar SET description = ? WHERE act = ? ''', (description, self.activity))
message += "{}, ".format(description)
self.description = description
if vCalendar is not None:
cursor.execute(
'''UPDATE activityCalendar SET vCalendar = ? WHERE act = ? ''', (vCalendar, self.activity))
message += "{}, ".format(vCalendar)
self.vCalendar = vCalendar
message += "added to {}".format(self.activity)
db.commit()
except sqlite3.IntegrityError as e:
db.rollback()
raise e
except sqlite3.OperationalError as e:
db.rollback()
raise e
finally:
locals()
cursor.close()
def loadReg(self):
"""Method that creates an Horario object from current activity and database data"""
areg = getActivityRegister(self.database,self.activity)
horarios = json.loads(getActivityRegister(self.database,self.activity)[1])
h = horarios['horarios']
# Get all keys from 'horarios'
keys = list(h.keys())
# Get the first key and create the object
key = keys.pop()
objetoHorario = Horario(self.activity, key, h[key][0], h[key][1], h[key][2])
# Next, get all other keys and populate the object with data from ddbb
while len(keys)>0:
key = keys.pop()
objetoHorario.addAppointment(key,h[key][0], h[key][1], h[key][2])
return objetoHorario
def getParticipantsName(self):
"""Get all names and expire date from participants, from current database""" #and current initHour,activity
creditsObj = list()
for phone in self.participants:
phoneNumber, name, activityCreditsExpire, vCard = getUserRegister(self.database,phone)
activityDict = json.loads(activityCreditsExpire)
credits,expDate = activityDict[self.activity].split('@')
creditsObj.append(VencimientosCreditos(name,float(expDate),credits,phoneNumber))
return creditsObj
def getName(self,phoneNumber):
return getUserRegister(self.database,phoneNumber)
def createVcard(name,phone):
"""Create vcard formated string with name (given and family) and phoneNumber given"""
import vobject
j = vobject.vCard()
j.add('n')
[nombrePila,apellido] = name.split(' ')
j.n.value = vobject.vcard.Name( family=apellido, given=nombrePila )
j.add('fn')
j.fn.value = name
j.add('tel')
j.tel.value = phone
return j.serialize()
def createUserRegisterFromVCard(database,vCard,activity=None,credit=None,expDate=None):
import vobject
# if (activity or credit) is not None: return "You must give both values: activity and credits. Or you can give nither of them"
vcObj = vobject.readOne(vCard)
name = vcObj.contents['fn'][0].value
phonedata = vcObj.contents['tel'][0].value
phone = phonedata.lstrip('+').replace(' ','').replace('-','') #Not very elegant, but ...
createUserRegisterDB(database,
phone,
name,
activity,
credit,
vCard,
expDate)
def createHumanDate(day,month,hour,mins):
"""Create an epoch datetime from date and time given in a non standard way"""
# datetime.datetime(2016,8,02,18,18,18).strftime("%A %d %B %Y")
# 'martes 02 agosto 2016
# IMPORTANT: The locale should be already set by now. i.e. locale.setlocale(locale.LC_ALL,'es_AR.utf8')
# ABANDONADA esta parte del código, para hacer algo mucho mas KISS
import locale
import datetime
import time
import calendar
daysOfTheWeek = map(lambda d: d.lower(), list(calendar.day_name))
if dayNumber is None:
dayNumber = None
def createActivityRegister(
database,
activity,
initHour=None,
endHour=None,
quota='1',
description=None,
vCalendar=None):
"""
Creo una entrada en la tabla con 'activity' y un horario.
En cada tabla de actividad puedo agregar mas de un horario, por ejemplo:
lunes 18:30, lunes 20:30, martes 9:00, miercoles 13:00
quota: Cantidad maxima de participantes permitida por horario (a las 13hs pueden
8 pero a las 16hs pueden ser 20)
TODO: Pasar los dias de la semana a minusculas siempre. Asegurar
formato am/pm y/o formato 24hs para pm. Probablemente
eso es en una capa superior.
"""
# Construyo el objeto tipo Horario:
objetoHorario = Horario(activity,initHour,endHour,quota)
#print(objetoHorario.__dict__)
horarios = json.dumps(objetoHorario, default=jdefault)
try:
db = sqlite3.connect(database)
cursor = db.cursor()
cursor.execute(
'''INSERT INTO activityCalendar(act, horarios, quota, description, vCalendar)
VALUES(?,?,?,?,?)''', (activity, horarios, quota, description, vCalendar))
print("DEBUG: Register %s, %s, %s, %s , %s added"% (activity, horarios, quota, description, vCalendar))
db.commit()
except sqlite3.IntegrityError as e:
db.rollback()
print("Existing record...")
print(e) # Son necesarios los raise?, por las dudas lo saque para que no falle.
except sqlite3.OperationalError as e:
db.rollback()
print("DEBUG: Diferenciar el tipo de error")
raise e
finally:
cursor.close()
def modifyActivityRegister(
database,
activity,
initHour,
endHour=None,
quota='1',
participants=None,
description=None,
vCalendar=None):
"""
Modifico algun valor de la tabla
Horarios, cantidad maxima de participantes (quota)
TODO: Chequear la integridad (formato) de los datos antes de guardar.
horarios: "{'Dia':'(HoraComienzo,HoraFin,participants,quota),(HoraComienzo2,HoraFin2,participants,quota2),...)'}"
horarios incluye la cantidad maxima de participantes y lista de inscriptos (telefonos)
Dia, HoraComienzo, horaFin, quota: Are UTF-8 strings
participants is a set, including telephoneNumber's participants
Dia: lunes|martes|... en minusculas.
Hora: HH:MM
"""
# Primero tengo que obtener de la base de datos, lo que haya
activityRegister = getActivityRegister(database, activity)
if activityRegister[0] == activity:
# Luego transformalo en un objeto clase Horario
horarios = json.loads(activityRegister[1])
h = horarios['horarios']
for key in h.keys():
objetoHorario = Horario(activity, key, h[key][0], h[key][1], h[key][2])
#print("dentro el for")
#print(h[key][2])
#print(objetoHorario.horarios[key][2])
# Recupero los valores para no pisarlos despues (solo el que modifica)
if initHour == key:
participantsReg = objetoHorario.horarios[key][2]
#print("aca")
#print objetoHorario.horarios
#print(participantsReg)
if participants is not None:
#print("New participants, but recovering old ones: {}".format(participantsReg))
##print(participantsReg)
participantsReg.update(participants)
if endHour == None:
endHour = objetoHorario.horarios[key][0]
if quota == '1':
quota = objetoHorario.horarios[key][1]
else:
print("Appointment {key} is not going to be modified".format(key))
#print("{}, {}, {}, {}".format(key, h[key][0],h[key][1],participantsReg))
# Ya tengo el objeto, ahora puedo actualizarlo:
objetoHorario.addAppointment(initHour,endHour,quota, participantsReg)
horariosJSON = json.dumps(objetoHorario, default=jdefault)
#print(horariosJSON)
else:
return "Message: Not such activity defined"
try:
db = sqlite3.connect(database)
cursor = db.cursor()
# Aca va un update only horario column.
cursor.execute(
'''UPDATE activityCalendar SET horarios = ? WHERE act = ? ''', (horariosJSON, activity))
message = "Message: {}, ".format(horariosJSON)
if description is not None:
cursor.execute(
'''UPDATE activityCalendar SET description = ? WHERE act = ? ''', (description, activity))
message += "{}, ".format(description)
if vCalendar is not None:
cursor.execute(
'''UPDATE activityCalendar SET vCalendar = ? WHERE act = ? ''', (vCalendar, activity))
message += "{}, ".format(vCalendar)
message += "added to {}".format(activity)
db.commit()
except sqlite3.IntegrityError as e:
db.rollback()
raise e
except sqlite3.OperationalError as e:
db.rollback()
raise e
finally:
cursor.close()
return message
def addActivityParticipant(
database,
activity,
initHour,
telephone):
return modifyActivityParticipant(database,activity,initHour,telephone)
def getActivitiesNames(database):
"""
Return all activities' names.
return: str with all activities' names
"""
db = sqlite3.connect(database)
cursor = db.cursor()
c = cursor.execute('SELECT * FROM activityCalendar')
activities = list()
for register in c.fetchall(): # TODO: This is very ram consuming...
activities.append(register[0]) # Try other approach
return activities
def getActivityRegister(database, activity):
db = sqlite3.connect(database)
actividad= (activity,) # Safe way to retrieve data from database
cursor = db.cursor()
lista = cursor.execute('SELECT * FROM activityCalendar WHERE act=?',actividad).fetchone()
cursor.close()
return lista # I could return data as: Name, activity (n credits expire on 'expireDate')
def createAppointmentDB(database):
"""
Database's name should be related to the client's market
Creates a database with: phone, name, activityCreditExpireDate, vCard
Phone number should include international code
"""
try:
db = sqlite3.connect(database)
# Get a cursor object
cursor = db.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS cuentaUsuarios(phone TEXT PRIMARY KEY,
name TEXT, activityCreditExpire TEXT, vCard TEXT)
''')
# En principio solo uso act y horarios. En horarios guardo un json serializando un objeto clase Horarios
cursor.execute('''
CREATE TABLE IF NOT EXISTS activityCalendar(act TEXT PRIMARY KEY,
horarios TEXT, quota TEXT, description TEXT, vCalendar TEXT)
''')
db.commit()
except Exception as e:
# Roll back any change if something goes wrong
db.rollback()
raise e
finally:
# Close the connection database
cursor.close()
def createUserRegisterDB(
database=None,
phone=None,
name=None,
activity=None,
credit=None,
vCard=None,
expDate=None):
"""
activity, credit and the expire date is stored
in a Dictionary using activity as string key.
Credit and ExpireDate will be stored as should be as credit@expireDate.
expireDate will be calculated on fly when credits are added.
Example: '{"act1" : "cred1@date1", "act2" : "cred2@date2", "act3" : ... }'
expDate should be in a defined format (for example: DD-MM-YYYY)
"""
if vCard is not None:
vCard = vCard.encode('utff-8')
db = sqlite3.connect(database)
# Get a cursor object
cursor = db.cursor()
if not activity == None:
# Expire date
if expDate == None:
expDate = str(time() + 2628000) #2628000 secs in a month 365.0/12
else:
print("TODO: Check if date is in the defined format DD-MM-YYYY, and convert it to epoch time")
# Format activity, credits, expDate:
if credit == None: credit = 0 # We have activity but credit was not a given parameter, so make it zero
creditAtDate = '@'.join((credit,expDate))
dictActivity = {activity: creditAtDate}
activityCreditExpire = json.dumps(dictActivity)
else:
activityCreditExpire = activity # None
try:
t = (phone, name, activityCreditExpire, vCard)
cursor.execute('''INSERT INTO cuentaUsuarios(phone, name, activityCreditExpire, vCard)
VALUES(?,?,?,?)''', t)
db.commit()
print("Register %s, %s, %s, %s added"% t)
except sqlite3.IntegrityError as e:
db.rollback()
return "WARNING: Phone number {} already exists! Nothing done.".format(phone)
#raise e
except sqlite3.OperationalError as e:
db.rollback()
print(e)
raise e
finally:
cursor.close()
return "New Register Done..."
def getUserRegister(database,phoneNumber):
"""Returns (phone, name, activityCreditsExpire,vCard) from database"""
try:
t = (phoneNumber,)
db = sqlite3.connect(database)
cursor = db.cursor()
c = cursor.execute('SELECT * FROM cuentaUsuarios WHERE phone=?', t)
fetchedData = c.fetchone()
except Exception as e:
# Roll back any change if something goes wrong, is this really necesary
# if I'm only reading from database?
db.rollback()
raise e
finally:
cursor.close()
return fetchedData # Should I return data as: Name, activity (n credits expire on 'expireDate')?
def humanActivityCreditsExpire(activityCreditsExpire):
"""Will give you a dictionary with the activity as the key, and a tuple with credits and expire date
in human format. The input param must have the output format of getUserRegister for activityCredditsExpire"""
activitiesAndCreditsExpireDatesDictionary = dict()
activityCreditsExpireDict = json.loads(activityCreditsExpire)
for activity in activityCreditsExpireDict:
credits,expireDate = activityCreditsExpireDict[activity].split('@')
activityCreditsExpireDict[activity] = (credits,formatDate(localtime(float(expireDate))[0:5])[1])
return activityCreditsExpireDict
def modifyRegisterCredit(database, phoneNumber, activity, newCredits, name=None, vCard=None):
"""
This function can be used to add new fields or update existing ones.
When adding new credits, it updates for 1 month the expire date.
TODO: Take into account the type of each field (credits should be an int, ...)
"""
# First I get the whole register using that phone number
(phone, name, activityCreditsExpire,vCard) = getUserRegister(database,phoneNumber)
# Get activities' list
if not activityCreditsExpire == None:
print("tipo %s y valor: %s" % (type(activityCreditsExpire),activityCreditsExpire))
activityDict= json.loads(activityCreditsExpire)
(credits, oldExpireDate) = activityDict[activity].split('@')
else:
activityDict = {}
credits = newCredits
####### Create new expire date if adding credits
if int(newCredits) > 0: # When adding new credits, update expire date of credits
expireDate = repr(time() + 2628000) # Next month (30 days + 10 hs.) at this time in epoch format
else:
expireDate = oldExpireDate # Don't modify expire date because we are not adding new credits
#######
# Get credits and activity's phoneNumber)
# Find activity
if activity in activityDict: # update only credits
credits = str( int(credits) + int(newCredits) ).encode('utf-8')
activityDict[activity] = '@'.join((credits,expireDate))
fechaHumana = strftime("%d %b %Y", localtime(float(expireDate)))
print("En {0} tiene {1} creditos hasta el {2} inclusive".format(activity, credits, fechaHumana))
# Now update register with new credit and activity data
try:
db = sqlite3.connect(database)
cursor = db.cursor()
cursor.execute('''UPDATE cuentaUsuarios SET activityCreditExpire = ? WHERE phone = ? ''',
(json.dumps(activityDict), phone))
db.commit()
except Exception as e:
# Roll back any change if something goes wrong
db.rollback()
raise e
finally:
cursor.close()
#db = sqlite3.connect('j1rn4s10')
# Get a cursor object
#cursor = db.cursor()
#cursor.execute('''SELECT phone, name, activityCreditExpire FROM cuentaUsuarios''')
#user1 = cursor.fetchone() #retrieve the first row
#print(user1[0]) #Print the first column retrieved(user's name)
#all_rows = cursor.fetchall()
#for row in all_rows:
# # row[0] returns the first column in the query (name), row[1] returns email column.
# print('{0} : {1}, {2}'.format(row[0], row[1], row[2]))
def jdefault(o):
if isinstance(o, set):
return list(o)
return o.__dict__
# https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
def formatDate(timeTuple):
""" Returns a tuple with the hour in HH:MM (AM|PM) format,
Locale’s appropriate date and time representation (e.g. Mon Sep 30 07:06:05 2013)
and seconds since the epoch in localtime if date is also given as parameter.
"""
import datetime #Read the docs: https://docs.python.org/2/library/datetime.html
from time import mktime #Read the docs: https://docs.python.org/2/library/time.html#module-time
horaFecha = None
Fecha_str = None
year,month,day,hour,minute =timeTuple
if (day or month or year) is not None:
t = datetime.datetime.combine(datetime.datetime(year,month,day),
datetime.time(hour,minute))
Fecha_str = t.strftime("%A %d %B %Y")
horaFecha = mktime(t.timetuple())
hora_str = datetime.time(hour,minute).strftime("%H:%M%p")
return (hora_str,Fecha_str,horaFecha)
def dateTime2EpochString(datetimeObj):
from time import mktime
return str(int(mktime(datetimeObj.timetuple())))
def dateTime2Epoch(datetimeObj):
from time import mktime
return mktime(datetimeObj.timetuple())
class HandleDateTime(datetime):
def __init__(self,ano,mes,dia,hora=0,minuto=0):
print(datetime.ctime(datetime(ano,mes,dia,hora,minuto)))
class VencimientosCreditos:
def __init__(self,name,epochDate,credits,phone):
self.name = name
self.credits = credits
self.epochDate = float(epochDate)
self.phone = phone
y,m,d,h,mi,s = localtime(self.epochDate)[0:6]
#expireAndNames[epochDate] = name+','+'@'.join((credits,self.expDate.)
self.expDate = datetime(y,m,d,h,mi,s).strftime("%c").decode('utf-8')
def __repr__(self):
return repr((self.name, self.credits, self.expDate,self.phone))
# TODO: Crear un metodo para obtener todas las initHour a partir de un rango dado. DONE periodReport
# TODO: Crear un método que construya el initHour a partir de datos humanos (lunes 18:00hs, por ej.) DONE formatDate
# TODO: Crear método que ofrezca turnos disponibles, del dia corriente, o del día indicado por parámetro. DONE reportAvailableAppointments
| gpl-3.0 | -3,729,589,565,397,309,400 | 40.734745 | 167 | 0.636052 | false |
team-xue/xue | xue/accounts/migrations/0005_auto__add_field_dmuserprofile_location.py | 1 | 5137 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DMUserProfile.location'
db.add_column('accounts_dmuserprofile', 'location', self.gf('django.db.models.fields.CharField')(max_length=256, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DMUserProfile.location'
db.delete_column('accounts_dmuserprofile', 'location')
models = {
'accounts.dmuserprofile': {
'Meta': {'object_name': 'DMUserProfile'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'ethnic': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'join_date': ('django.db.models.fields.DateField', [], {}),
'language': ('django.db.models.fields.CharField', [], {'default': "'zh'", 'max_length': '5'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'major': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'role': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| bsd-3-clause | -5,484,120,963,883,985,000 | 65.592105 | 182 | 0.550127 | false |
lonelyandrew/IRC | proj_1/iitable/iitable.py | 1 | 8329 | #! /usr/bin/env python3
import re
import os
class WordChain:
'''The element in inverted index table that hold a word.
Attributes:
word: The word of the chain.
freq: How many docs that the word appear, namely frequence.
head: The first node in the Chain.
'''
def __init__(self, word, freq=0):
'''Inits the chain with a word.
'''
self.word = word
self.freq = freq
self.head = None
self.tail = None
def insert_node(self, node):
'''Insert a node into the chain.
Args:
node: The node that will be inserted.
Raises:
ValueError: An error occured when the node is None,
or the node has a less id compared with the tail,
i.e. that the chain must be ordered.
'''
if node is None:
raise ValueError('the inserting node cannot be None.')
if self.tail is not None and self.tail.doc_id >= node.doc_id:
raise ValueError('the inserting node have the wrong order.')
if self.head is None:
self.head = node
else:
self.tail.next = node
self.tail = node
self.freq += 1
def union(self, chain):
'''Union two word chains.
Args:
chain: The chain to be unioned.
Returns:
A new chain which united self and chain.
Raises:
ValueError: when the chain to be unioned is itself.
'''
if self.word == chain.word:
raise ValueError('the chain cannot be unioned with itself')
node_one = self.head
node_two = chain.head
new_word = '{0!s} OR {1!s}'.format(self.word, chain.word)
new_chain = WordChain(new_word)
while (node_one is not None) and (node_two is not None):
min_node = min(node_one, node_two)
new_node = min_node.copy()
new_chain.insert_node(new_node)
if node_one < node_two:
node_one = node_one.next
elif node_one > node_two:
node_two = node_two.next
else:
node_one = node_one.next
node_two = node_two.next
node_remain = node_one if node_two is None else node_two
while node_remain is not None:
new_node = node_remain.copy()
new_chain.insert_node(new_node)
node_remain = node_remain.next
return new_chain
def intersection(self, chain):
'''Intersect two chains.
Args:
chain: The chain to be intersected.
Returns:
A new chain which intersect self and chain.
Raises:
ValueError: when the chain to be intersected is itself.
'''
if self.word == chain.word:
raise ValueError('the chain cannot be intersected with itself')
node_one = self.head
node_two = chain.head
new_word = '{0!s} AND {1!s}'.format(self.word, chain.word)
new_chain = WordChain(new_word)
while (node_one is not None) and (node_two is not None):
if node_one == node_two:
new_node = node_one.copy()
new_chain.insert_node(new_node)
node_one = node_one.next
node_two = node_two.next
elif node_one > node_two:
node_two = node_two.next
else:
node_one = node_one.next
return new_chain
def diff(self, chain):
'''Get a complement of chain in self.
Args:
chain: the compared chain.
Returns:
A new chain have elements which are in self but not in chain.
Raises:
ValueError: when the chain to be diffed is itself.
'''
if self.word == chain.word:
raise ValueError('the chain cannot be compared with itself')
node_one = self.head
node_two = chain.head
new_word = '{0!s} AND NOT {1!s}'.format(self.word, chain.word)
new_chain = WordChain(new_word)
while (node_one is not None) and (node_two is not None):
if node_one == node_two:
node_one = node_one.next
node_two = node_two.next
elif node_one < node_two:
new_node = node_one.copy()
new_chain.insert_node(new_node)
node_one = node_one.next
else:
node_two = node_two.next
while node_one is not None:
new_node = node_one.copy()
new_chain.insert_node(new_node)
node_one = node_one.next
return new_chain
def __str__(self):
chain_str = '({0!s}, freq:{1:d}) *'.format(self.word, self.freq)
if self.head is not None:
node_to_print = self.head
while node_to_print is not None:
chain_str += ' --> '
chain_str += str(node_to_print)
node_to_print = node_to_print.next
return chain_str
class Node(object):
'''The nested class acts as a node in the chain.
Attributes:
doc_id: The id the doc which contains the word.
next: The next node in the chain, if the node is at the end of the
chain, it will be None.
'''
def __init__(self, doc_id: int=0):
'''Inits the node with an integer doc id.
'''
self.doc_id = doc_id
self.next = None
def __str__(self):
return str(self.doc_id)
def __lt__(self, other):
return self.doc_id < other.doc_id
def __gt__(self, other):
return self.doc_id > other.doc_id
def __eq__(self, other):
return self.doc_id == other.doc_id
def __le__(self, other):
return self.doc_id <= other.doc_id
def __ge__(self, other):
return self.doc_id >= other.doc_id
def __ne__(self, other):
return self.doc_id != other.doc_id
def copy(self):
'''Return a new node with the same doc id.
'''
return WordChain.Node(self.doc_id)
def process_doc(doc_location, doc_id):
'''Process the single doc into pairs of word and doc id.
Args:
doc_location: The location of the doc.
doc_id: The id of the doc.
Yields:
word: The word appears in the doc.
doc_id: The id of the doc.
'''
word_list = set()
p = re.compile('[^\W_]+')
with open(doc_location) as doc:
for line in doc:
word_list.update(p.findall(line.lower()))
for word in word_list:
yield word, doc_id
def build_sitable(doc_list):
'''Generate sorted index table with multilple docs.
Args:
doc_list: A list contains several process_doc generator.
Yields:
row: The single row of the sorted index table.
'''
items = []
for doc in doc_list:
for word, doc_id in doc:
items.append((word, doc_id))
items.sort()
for item in items:
yield item[0], item[1]
def build_iitable(sorted_table):
'''Build the inverted index table with a sorted table.
Args:
sorted_table: The sorted table built before with docs.
Returns:
A dict whose keyis are words and the value of the
single key is a word chain.
'''
iv_table = {}
for word, doc_id in sorted_table:
if word not in iv_table:
iv_table[word] = WordChain(word)
node = WordChain.Node(doc_id)
iv_table[word].insert_node(node)
return iv_table
def fetch_iitable():
'''Get the iitable with specific documents.
'''
doc_path_list = [doc_loc(i+1) for i in range(3)]
doc_list = (process_doc(doc_path_list[i], i + 1) for i in range(3))
return build_iitable(build_sitable(doc_list))
def doc_loc(doc_id):
'''Get the location of the doc with certain id.
Args:
doc_id: The id of the doc, normally which is a number.
Returns:
A string of the absolute path to the doc file.
'''
file_name = '../data/pp_{0:d}.txt'.format(doc_id)
path = os.path.join(os.path.dirname(__file__), file_name)
return path
| mit | -9,006,407,240,480,431,000 | 28.327465 | 78 | 0.541001 | false |
mllnd/iot-sample | main.py | 1 | 1302 | import socket
from machine import Pin
led_pin = Pin(5, Pin.OUT)
CONTENT = """\
HTTP/1.0 200 OK
Content-Type: text/html
<html>
<head>
</head>
<body>
<p>Hello #%d from MicroPython!</p>
<a href="/toggle">Click here to toggle LED hooked to pin 5</a>
</body>
</html>
"""
def main():
s = socket.socket()
port = 8080
ai = socket.getaddrinfo("0.0.0.0", port)
print("Bind address info:", ai)
addr = ai[0][-1]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(5)
print("Listening, connect your browser to http://<host>:"+str(port)+"/")
counter = 0
while True:
sock, addr = s.accept()
print("Client address:", addr)
stream = sock.makefile("rwb")
req = stream.readline().decode("ascii")
method, path, protocol = req.split(" ")
print("Got", method, "request for", path)
if path == "/toggle":
led_pin.value(1-led_pin.value())
while True:
h = stream.readline().decode("ascii").strip()
if h == "":
break
print("Got HTTP header:", h)
stream.write((CONTENT % counter).encode("ascii"))
stream.close()
sock.close()
counter += 1
print()
main() # ctrl+c to exit
| mit | -9,214,082,512,754,436,000 | 24.038462 | 76 | 0.548387 | false |
tldavies/RackHD | test/tests/rackhd11/test_rackhd11_api_config.py | 1 | 3406 | '''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd11_api_config(fit_common.unittest.TestCase):
def test_api_11_config(self):
api_data = fit_common.rackhdapi('/api/1.1/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
# check required fields
self.assertIn('PATH', api_data['json'], 'PATH field error')
self.assertIn('amqp', api_data['json'], 'amqp field error')
self.assertIn('apiServerAddress', api_data['json'], 'apiServerAddress field error')
self.assertIn('apiServerPort', api_data['json'], 'apiServerPort field error')
self.assertIn('broadcastaddr', api_data['json'], 'broadcastaddr field error')
self.assertIn('CIDRNet', api_data['json'], 'CIDRNet field error')
self.assertIn('subnetmask', api_data['json'], 'subnetmask field error')
self.assertIn('mongo', api_data['json'], 'mongo field error')
def test_api_11_config_httpendpoints(self):
api_data = fit_common.rackhdapi('/api/1.1/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
self.assertIn('httpEndpoints', api_data['json'], 'httpEndpoints field list error')
# verify both northbound and southbound endpoints are configured (as a minimum)
for endpoint in api_data['json']['httpEndpoints']:
self.assertIn('address', endpoint, 'missing httpEndpoints address field')
self.assertIn('authEnabled', endpoint, 'missing httpEndpoints authEnabled field')
self.assertIn('httpsEnabled', endpoint, 'missing httpEndpoints httpsEnabled field')
self.assertIn('proxiesEnabled', endpoint, 'missing httpEndpoints proxiesEnabled field')
self.assertIn('routers', endpoint, 'missing httpEndpoints routers field')
self.assertIn(endpoint['routers'], ['northbound-api-router', 'southbound-api-router'], 'unexpected httpEndpoints routers field')
def test_api_11_config_patch(self):
api_data_save = fit_common.rackhdapi('/api/1.1/config')['json']
data_payload = {"CIDRNet": "127.0.0.1/22"}
api_data = fit_common.rackhdapi("/api/1.1/config", action="patch", payload=data_payload)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item
self.assertNotEqual(item, '', 'Empty JSON Field:' + item)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
api_data = fit_common.rackhdapi("/api/1.1/config", action="patch", payload=api_data_save)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
api_data = fit_common.rackhdapi('/api/1.1/config')
self.assertEqual(api_data['json'], api_data_save)
if __name__ == '__main__':
fit_common.unittest.main()
| apache-2.0 | 6,926,227,789,278,480,000 | 53.935484 | 140 | 0.662067 | false |
stephenliu1989/msmbuilder | msmbuilder/featurizer/subset.py | 1 | 6212 | import mdtraj as md
import numpy as np
from . import Featurizer, TrajFeatureUnion
class BaseSubsetFeaturizer(Featurizer):
"""Base class for featurizers that have a subset of active features.
n_features refers to the number of active features. n_max refers to the
number of possible features.
Parameters
----------
reference_traj : mdtraj.Trajectory
Reference Trajectory for checking consistency
subset : np.ndarray, default=None, dtype=int
The values in subset specify which of all possible features
Notes
-----
As an example, suppose we have an instance that has `n_max` = 5. This
means that the possible features are subsets of [0, 1, 2, 3, 4]. One possible
subset is then [0, 1, 3]. The allowed values of subset (e.g. `n_max`)
will be determined by the subclass--e.g. for example, `n_max` might be
the number of phi backbone angles.
"""
def __init__(self, reference_traj, subset=None):
self.reference_traj = reference_traj
if subset is not None:
self.subset = subset
else:
self.subset = np.zeros(0, 'int')
@property
def n_features(self):
return len(self.subset)
class SubsetAtomPairs(BaseSubsetFeaturizer):
"""Subset featurizer based on atom pair distances.
Parameters
----------
possible_pair_indices : np.ndarray, dtype=int, shape=(n_max, 2)
These are the possible atom indices to use for calculating interatomic
distances.
reference_traj : mdtraj.Trajectory
Reference Trajectory for checking consistency
subset : np.ndarray, default=None, dtype=int
The values in subset specify which of all possible features are
to be enabled. Specifically, atom pair distances are calculated
for the pairs `possible_pair_indices[subset]`
periodic : bool, optional, default=False
if True, use periodic boundary condition wrapping
exponent : float, optional, default=1.0
Use the distances to this power as the output feature.
See Also
--------
See `get_atompair_indices` for how one might generate acceptable atom pair
indices.
"""
def __init__(self, possible_pair_indices, reference_traj, subset=None, periodic=False, exponent=1.0):
super(SubsetAtomPairs, self).__init__(reference_traj, subset=subset)
self.possible_pair_indices = possible_pair_indices
self.periodic = periodic
self.exponent = exponent
if subset is None:
self.subset = np.zeros(0, 'int')
else:
self.subset = subset
@property
def n_max(self):
return len(self.possible_pair_indices)
def partial_transform(self, traj):
if self.n_features > 0:
features = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic) ** self.exponent
else:
features = np.zeros((traj.n_frames, 0))
return features
@property
def pair_indices(self):
return self.possible_pair_indices[self.subset]
class SubsetTrigFeaturizer(BaseSubsetFeaturizer):
"""Base class for featurizer based on dihedral sine or cosine.
Notes
-----
Subsets must be a subset of 0, ..., n_max - 1, where n_max is determined
by the number of respective phi / psi dihedrals in your protein, as
calcualted by mdtraj.compute_phi and mdtraj.compute_psi
"""
def partial_transform(self, traj):
if self.n_features > 0:
dih = md.geometry.dihedral.compute_dihedrals(traj, self.which_atom_ind[self.subset])
features = self.trig_function(dih)
else:
features = np.zeros((traj.n_frames, 0))
return features
@property
def n_max(self):
return len(self.which_atom_ind)
class CosMixin(object):
def trig_function(self, dihedrals):
return np.cos(dihedrals)
class SinMixin(object):
def trig_function(self, dihedrals):
return np.sin(dihedrals)
class PhiMixin(object):
@property
def which_atom_ind(self):
atom_indices, dih = md.geometry.dihedral.compute_phi(self.reference_traj)
return atom_indices
class PsiMixin(object):
@property
def which_atom_ind(self):
atom_indices, dih = md.geometry.dihedral.compute_psi(self.reference_traj)
return atom_indices
class SubsetCosPhiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin):
pass
class SubsetCosPsiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin):
pass
class SubsetSinPhiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin):
pass
class SubsetSinPsiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin):
pass
class SubsetFeatureUnion(TrajFeatureUnion):
"""Mixtape version of sklearn.pipeline.FeatureUnion with feature subset selection.
Notes
-----
Works on lists of trajectories.
Has a hacky convenience method to set all subsets at once.
"""
@property
def subsets(self):
return [featurizer.subset for (_, featurizer) in self.transformer_list]
@subsets.setter
def subsets(self, value):
assert len(value) == len(self.transformer_list), "wrong len"
for k, (_, featurizer) in enumerate(self.transformer_list):
featurizer.subset = value[k]
@property
def n_max_i(self):
return np.array([featurizer.n_max for (_, featurizer) in self.transformer_list])
@property
def n_features_i(self):
return np.array([featurizer.n_features for (_, featurizer) in self.transformer_list])
@property
def n_featurizers(self):
return len(self.transformer_list)
@property
def n_max(self):
return np.sum([featurizer.n_max for (_, featurizer) in self.transformer_list])
@property
def n_features(self):
return sum([featurizer.n_features for (_, featurizer) in self.transformer_list])
class DummyCV(object):
"""A cross-validation object that returns identical training and test sets."""
def __init__(self, n):
self.n = n
def __iter__(self):
yield np.arange(self.n), np.arange(self.n)
def __len__(self):
return self.n
| lgpl-2.1 | -6,473,830,598,958,164,000 | 28.865385 | 118 | 0.661301 | false |
cornell-zhang/datuner | src/datuner.py | 1 | 15807 | #!/usr/bin/python
#==================================================
# run DATUNER
#===================================================
import uuid, platform
import sys, os, argparse, socket, pickle, subprocess, sqlite3, dispy, time
from threading import Thread
from datetime import datetime
DATUNER_HOME = os.environ['DATUNER_HOME']
sys.path.append(DATUNER_HOME + '/src')
pwd = os.getcwd()
sys.path.append(pwd)
from space_partition import *
#-------------------------------------------------
# parse parameters and read information from py
#-------------------------------------------------
parser = argparse.ArgumentParser(description='main script for DATuner')
parser.add_argument('-f', '--flow', type=str, dest='tool', choices=['vtr','vivado','quartus','custom'])
parser.add_argument('-b', '--budget', type=int, default=1, dest='limit')
parser.add_argument('-t', '--timeout', type=str, default='0.0d:0.0h:0.0m:7200.0s', dest='stop', help='format: 4d:2h:5m:9s')
parser.add_argument('-p', '--parallel', type=int, default=1, dest='pf')
args = parser.parse_args()
flow = args.tool #from -f
budget = args.limit #from -b
proc_num = args.pf #from -p maximum = # of available cpus
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
if os.path.exists(pwd + '/vtr.py') and flow == 'vtr':
from vtr import *
elif os.path.exists(pwd + '/vivado.py') and flow == 'vivado':
from vivado import *
elif os.path.exists(pwd + '/quartus.py') and flow == 'quartus':
from quartus import *
elif os.path.exists(pwd + '/custom.py') and flow == 'custom':
from custom import *
else:
print "missing [tool_name].py under current folder"
sys.exit(1)
print '[ 0s] INFO the current workdir is: ' + pwd
#-------parameters check------
if flow == '':
print "Please specify the tool name."
sys.exit(1)
if flow == "vtr":
if os.path.exists(vtrpath +"/scripts") == False:
print "vtr path is not correct. Please check. The path should to point to .../vtr/vtr_release/vtr_flow"
sys.exit(1)
elif flow == 'vivado':
if top_module == '':
print "Vivado is used. Please specify the top module."
sys.exit(1)
elif flow == 'quartus':
if top_module == '':
print "Quartus is used. Please specify the top module."
sys.exit(1)
tune_cst = 0
if flow == 'vivado':
if eval(flow + '.modify_cst') == 'y' or eval(flow + '.modify_cst') == 'yes':
tune_cst = 1
if eval(flow + '.tune_cst') == '':
print "Please specify the default timing constraint."
sys.exit(1)
# parser the time limit
timelist = args.stop.split(':')
minute = 0
day = 0
sec = 0
hour = 0
for timer in range(len(timelist)):
if timelist[timer].endswith('s'):
sec = float(timelist[timer][0:-1])
if timelist[timer].endswith('d'):
day = float(timelist[timer][0:-1])
if timelist[timer].endswith('m'):
minute = float(timelist[timer][0:-1])
if timelist[timer].endswith('h'):
hour = float(timelist[timer][0:-1])
stoptime = int(sec + 60.0 * minute + 3600.0 * hour + 86400.0 * day)
print '[ 0s] INFO time limit: ' + str(stoptime) + ' seconds'
# a function to update stoptime variable for timeout checks
def updatetime(stoptime):
delta = time.time()-start_time
return stoptime - delta
#---------------------------
# Run DATuner
#---------------------------
# a list of all design points explored so far. Format of a design point:
# [[['param1', value1], ['param2', value2], ...], qor]
global_result = []
# a list of all the subspaces, format of a subspace:
# [[type, name, ['param1', range], ['param2', range], ...], score, frequency]
# 'frequency': number of times the subspace has been explored so far
subspaces = []
# Generates a list of sweep param combinations; Recursive function
# The sweep param combos are stored in sweeplist
sweeplist = []
def sweep(sweepparams, sweepset):
if not sweepparams:
sweeplist.append(sweepset)
return
else:
for param in sweepparams[0][1]:
next_sp = sweepparams[:]
del next_sp[0]
next_sweepset = sweepset[:]
next_sweepset.append([sweepparams[0][0], param])
sweep(next_sp, next_sweepset)
def sweep_function(run_id, flow, sweep, enums, genfile, top_module):
import subprocess, sys, os, time, pickle
os.system('mkdir -p ' + str(run_id))
os.system('cp package.zip ' + str(run_id))
os.chdir('./' + str(run_id))
os.system('unzip -o package.zip')
os.system('rm package.zip')
pickle.dump([enums, top_module], open('space.p', 'wb'))
pickle.dump([sweep, genfile], open('sweep.p', 'wb'))
os.system('python tune.py --test-limit=1')
msg, metadata, res = pickle.load(open('result.p', 'rb'))
return [sweep, res, metadata]
def tune_function(i, space):
import subprocess, sys, os, time, pickle
os.system('mkdir -p ' + str(i))
os.system('cp package.zip ' + str(i))
os.chdir('./' + str(i))
os.system('unzip -o package.zip')
os.system('rm package.zip')
pickle.dump(space, open('space.p', 'wb'))
os.system('python tune.py --test-limit=1 --parallelism=1')
msg, metadata, res = pickle.load(open('result.p', 'rb'))
return [msg, metadata, res]
# Extract the sweepparams
# Also checks to make sure that all other non sweep params are constant
sweepparams = []
sweepcnt = 1;
enum_not_fixed = False;
for param in space:
if param[0] == 'SweepParameter':
temp_param = copy.deepcopy(param)
del temp_param[0]
sweepparams.append(temp_param)
sweepcnt *= len(temp_param[1])
else:
if len(param[2]) != 1:
enum_not_fixed = True;
if enum_not_fixed and sweepcnt > 1:
print 'Both enum parameters and sweep parameters not fixed. One must be fixed!'
sys.exit()
if sweepcnt > 1:
start_time = datetime.now()
start_time_str = str(start_time.date()) + ' ' + str(start_time.time())
# Generate the list of sweep param combos
# Combos are stored in sweeplist
sweep(sweepparams, [])
numsweeps = len(sweeplist)
print 'Number of sweeps: ' + str(numsweeps)
sweeps_completed = 0
# Setup the results database and sweep points
# connect to database and create table is results table doesn't exist
dbconn = sqlite3.connect(dbfilename + '.db')
c = dbconn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=\'" + dbtablename + "\'")
table_exists = c.fetchone()
if table_exists is None:
# Generate the sweep parameter list string
sweepparam_names = ""
for param in sweepparams:
sweepparam_names = sweepparam_names + param[0] + ' int, '
c.execute('CREATE TABLE ' + dbtablename + ' (' + sweepparam_names + '''slack real,
comb_alut int, mem_alut int, reg int, bram int, dsp int, start_time text)''')
# Remove sweep points that have already been computed
# This is broken after implementation of variable number of sweepparams
# if not overwrite:
# for swept in c.execute('SELECT sweepparam FROM ' + dbtablename):
# if str(swept[0]) in space[-1][2]:
# space[-1][2].remove(str(swept[0]))
dbconn.close()
# Create a zip package with the necessary design and python files
os.system('rm package.zip')
os.system('rm -rf files')
os.system('mkdir files')
os.system('cp ' + DATUNER_HOME + '/src/tune.py files')
os.system('cp ' + flow + '.py files')
if (flow == "custom"):
os.system('cp -R ' + designdir + '/* files')
else:
os.system('cp ' + DATUNER_HOME + '/flows/' + flow + '/* files')
os.system('mkdir files/design')
os.system('cp -R ' + designdir + '/* files/design')
os.system('cd files; zip -r ../package.zip *')
# Initialize the job scheduler
# The dependence file is automatically sent to remote servers
cluster = dispy.JobCluster(sweep_function, depends = ['package.zip'],cleanup = False,loglevel=dispy.logger.DEBUG)
# copy files to and start up dispynode.py on worker machines
# this can be removed from release code if we assume users manually start dispy
for i in range(len(machines)):
machine_addr = machines[i % len(machines)]
subprocess.call(['scp', DATUNER_HOME + '/releases/Linux_x86_64/install/bin/dispynode.py', machine_addr + ':' +workspace]);
subprocess.Popen(['ssh', machine_addr, 'cd ' + workspace + \
'; python dispynode.py --serve 1 --clean --dest_path_prefix dispytmp_' + str(i)])
# Wait for the last node to be ready
time.sleep(3)
# send jobs to queue
jobs = []
for i in range(numsweeps):
selected_sweep = sweeplist[i]
job = cluster.submit(i, flow, selected_sweep, space, genfile, top_module)
job.id = i
jobs.append(job)
cluster.print_status()
cluster.wait() # waits for all scheduled jobs to finish
# Generate the sweep parameter list name string
# We are assuming that the ordering of sweep params are the same in sweepparams
# and elements of sweelist. This should always hold true unless the worker
# function modifies the sweep
# This can be fixed using pattern matching
sweepparam_names = ""
for param in sweepparams:
sweepparam_names = sweepparam_names + param[0] + ', '
column_names = '(' + sweepparam_names + '''slack, comb_alut, mem_alut, reg,
bram, dsp, start_time)'''
# reconnect to database to save results
dbconn = sqlite3.connect(dbfilename + '.db')
c = dbconn.cursor()
# Save results
for job in jobs:
sweep, res, metadata = job.result
comb_alut = str(metadata[0]).replace(',','')
mem_alut = str(metadata[1]).replace(',','')
reg = str(metadata[2]).replace(',','')
bram = str(metadata[3]).replace(',','')
dsp = str(metadata[4]).replace(',','')
# Generate the sweep parameter list value strings
sweepparam_vals = ""
for param in sweep:
sweepparam_vals = sweepparam_vals + param[1] + ', '
c.execute('INSERT INTO ' + dbtablename + ' ' + column_names + ''' VALUES
(''' + sweepparam_vals + str(res) + ''',
''' + str(comb_alut) + ''',''' + str(mem_alut) + ''',
''' + str(reg) + ''',''' + str(bram) + ''',
''' + str(dsp) + ''',''' + "'" + start_time_str + "'" + ''')''')
print("Sweepparam complete: " + str(sweep) + '\n')
dbconn.commit()
# Print results
t = (start_time_str,)
for result in c.execute('SELECT * FROM ' + dbtablename + ' WHERE start_time=? ORDER BY ' + sweepparams[0][0], t):
print result
dbconn.close()
cluster.print_status()
cluster.close()
else: #if not sweeping, datuner is tuning
start_time = time.time() #set start time
dbconn = sqlite3.connect(dbfilename + '.db')
c = dbconn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=\'res\'")
table_exists = c.fetchone()
if table_exists is None:
cfg_list = []
for i in space:
cfg_list.append(i[1] + ' text')
c.execute('CREATE TABLE res (' + ','.join(cfg_list) + ', QoR real)')
os.system('rm package.zip')
os.system('rm -rf files')
os.system('mkdir files')
os.system('cp ' + DATUNER_HOME + '/src/tune.py files')
os.system('cp ' + flow + '.py files')
if (flow == "custom"):
os.system('cp -R ' + designdir + '/* files')
else:
os.system('cp ' + DATUNER_HOME + '/flows/' + flow + '/* files')
os.system('mkdir files/design')
os.system('cp -R ' + designdir + '/* files/design')
os.system('cd files; zip -r ../package.zip *')
secret = uuid.uuid4()
cluster = dispy.JobCluster(tune_function,
depends = ['package.zip'],
secret = str(secret),
cleanup = False)
#dispy.jobCluster() creates and returns cluster
#sends tune_function to the given nodes (no nodes given). also broadcasts ID request to find nodes if none given.
#needs package.zip to run tune_function.
# copy files to and start up dispynode.py on worker machines
# this can be removed from release code if we assume users manually start dispy
for i in range(len(machines)):
machine_addr = machines[i % len(machines)]
platformArch = platform.system() + '_' + platform.machine()
# For Tesiting purpose on CircleCI (python -O datuner.py)
if not __debug__:
remoteCpy, remoteSsh = 'sshpass -p docker scp', 'sshpass -p docker ssh'
machineAddr = 'root@' + machine_addr
warningDisabler = ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
remoteCpy += warningDisabler
remoteSsh += warningDisabler
else:
remoteCpy, remoteSsh = 'scp', 'ssh'
machineAddr = machine_addr
scpComm = (' '.join([remoteCpy, DATUNER_HOME + '/releases/' + platformArch + '/install/bin/dispynode.py', machineAddr + ':' +workspace])).split()
subprocess.call(scpComm)
#subprocess.call([remoteCpy, DATUNER_HOME + '/releases/' + platformArch + '/install/bin/dispynode.py', machineAddr + ':' +workspace]);
sshComm = (' '.join([remoteCpy, DATUNER_HOME + '/releases/' + platformArch + '/install/bin/dispynode.py', machineAddr + ':' +workspace])).split()
sshProcess = subprocess.Popen(sshComm,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
bufsize=0)
sshProcess.stdin.write("cd " + workspace + "\n")
# sshProcess.stdin.write("export PATH=" + DATUNER_HOME + "/releases/" + platformArch + "/install/bin" + ":$PATH" + "\n")
# sshProcess.stdin.write("python dispynode.py --serve 1 --clean --secret " + \
# str(secret) + " --dest_path_prefix dispytmp_" + str(i) + "\n")
sshProcess.stdin.close()
subprocess.Popen(["python dispynode.py --serve 1 --clean --secret " + \
str(secret) + " --dest_path_prefix dispytmp_" + str(i) + "\n"], shell=True)
# Wait for the last node to be ready
time.sleep(3)
#Set parallelization numbers
if budget < proc_num:
runs_per_epoch = budget
epoch = 1
elif budget % proc_num > 0:
runs_per_epoch = proc_num
epoch = (budget / runs_per_epoch)+1
else:
runs_per_epoch = proc_num
epoch = budget / runs_per_epoch
# add the initial space and a score of 0 and a frequency of 1
subspaces.append([space, 0, 1])
best_res, total_search_count = 1e9, 0
for e in range(epoch):
#Time check before new jobs are submitted
stoptime = updatetime(stoptime)
if stoptime <= 0:
print 'Timeout has been reached.'
sys.exit(1)
jobs = []
for i in range(runs_per_epoch):
total_search_count += 1
job = cluster.submit(i, select_space(total_search_count, subspaces, global_result))
job.id = i
jobs.append(job)
stoptime = updatetime(stoptime)
cluster.wait(timeout=updatetime(stoptime)) #start termination if jobs are in progress when timeout is reached.
if (budget % proc_num) > 0:
budget = budget - runs_per_epoch
if budget < runs_per_epoch:
runs_per_epoch = budget
stoptime = updatetime(stoptime)
#Start termination if necessary
if stoptime <= 0:
print 'Timeout has been reached. Do not forget to clear port 51348.'
for job in jobs:
cluster.cancel(job)
sys.exit(1)
# Save results
for job in jobs:
cfg, metadata, res = job.result
if res < best_res:
best_res = res
global_result.append([cfg, res])
cfg_val = []
for space_iter in space:
cfg_name = space_iter[1]
for i in cfg:
if cfg_name == i[0]:
cfg_val.append(str(i[1]))
c.execute("INSERT INTO res VALUES ('" + "','".join(cfg_val) + "'," + str(res) + ")")
dbconn.commit()
with open("global_result.txt", "a") as f:
f.write(','.join(str(i) for i in (cfg + metadata)) + ',' + str(best_res) + '\n')
# send request to host to partition the space
partition_space(subspaces, global_result)
# terminate the host
dbconn.close()
| bsd-3-clause | 382,880,850,861,905,300 | 35.505774 | 154 | 0.621687 | false |
bnose/cli | bnose.py | 1 | 2755 | import stat
import os
import click
import requests
BOTTLENOSE_API_URL = 'http://127.0.0.1:8000/api/'
def has_valid_permissions(path):
perm = oct(stat.S_IMODE(os.lstat(path).st_mode))
return perm == oct(0o600)
def get_token():
try:
tokenfile = os.path.join(os.environ['HOME'], '.bnose')
token = None
if os.path.isfile(tokenfile):
with open(tokenfile, 'r') as outfile:
token = outfile.read().rstrip()
if not has_valid_permissions(tokenfile):
# invalidate token
token = None
return token
except KeyError:
raise OSError('Could not find .bnose: $HOME is not set')
def get_headers(**kwargs):
headers = {
'user-agent': 'bnose/1.0'
}
headers.update(**kwargs)
return headers
def get_auth_headers():
return get_headers(**{'Authorization': 'Token %s' % get_token()})
def _request(endpoint, **kwargs):
url = '{base_api_url}{endpoint}'.format(
base_api_url=BOTTLENOSE_API_URL,
endpoint=endpoint
)
response = requests.post(url, headers=get_auth_headers(), data=kwargs)
output = response.json()
message = output['message']
if 'color' in output.keys():
color = output['color']
else:
color = 'green'
click.secho(message, fg=color)
@click.group()
def cli():
pass
@cli.command()
def login():
username = click.prompt('Username')
password = click.prompt('Password', hide_input=True)
endpoint = '%sauth/' % BOTTLENOSE_API_URL
response = requests.post(endpoint, data={'username': username, 'password': password}, headers=get_headers())
output = response.json()
if 'token' in output.keys():
try:
tokenfile = os.path.join(os.environ['HOME'], '.bnose')
with open(tokenfile, 'w') as outfile:
outfile.write(output['token'])
os.chmod(tokenfile, 0o600)
except KeyError:
raise OSError('Could not find .bnose: $HOME is not set')
click.echo(output)
@cli.command()
@click.option('--memo', '-m', default='')
@click.option('--project', '-p', default='')
def start(memo, project):
"""Start worklog tracking."""
_request('worklog/start/', **{'memo': memo, 'project__slug': project})
@cli.command()
def pause():
"""Pause worklog tracking."""
_request('worklog/pause/')
@cli.command()
def resume():
"""Resume worklog tracking."""
_request('worklog/resume/')
@cli.command()
def status():
"""Status of the current tracking session."""
_request('worklog/status/')
@cli.command()
def stop():
"""Stop worklog tracking."""
_request('worklog/stop/')
@cli.command()
def log():
click.echo('Log')
| apache-2.0 | -9,016,452,144,250,770,000 | 22.347458 | 112 | 0.598185 | false |
zippy84/vtkbool | examples/python/csg_model_making/zusammen.py | 1 | 5667 | #!/usr/bin/env python
# *-* coding: UTF-8 *-*
# Copyright 2012-2020 Ronald Römer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# export LD_LIBRARY_PATH=/home/zippy/VTK8/lib
import sys
sys.path.extend(['/home/zippy/VTK8/lib/python3.6/site-packages',
'/home/zippy/vtkbool/build'])
import vtkboolPython
import vtk
import math
import os
import re
from teile import Alignment, extrude, repeat, add_frame
def merge1():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test4.vtk')
fr = add_frame(r, [(-9.858333, 2.5, 'top'), (-29.575, 2.5, 'top'), (-49.291666, 2.5, 'top'), (-69.008333, 2.5, 'top'), (-88.725, 2.5, 'top')])
return fr
def merge2():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test5.vtk')
fr = add_frame(r, [(-9.208333, 2.5, 'top'), (-27.625, 2.5, 'top'), (-46.041666, 2.5, 'top'), (-64.458333, 2.5, 'top'), (-82.875, 2.5, 'top')])
return fr
def merge3():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test0.vtk')
r2 = vtk.vtkPolyDataReader()
r2.SetFileName('einzeln/test1.vtk')
tr = vtk.vtkTransform()
tr.RotateZ(90)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(tr)
tp.SetInputConnection(r2.GetOutputPort())
bf = vtkboolPython.vtkPolyDataBooleanFilter()
bf.SetInputConnection(r.GetOutputPort())
bf.SetInputConnection(1, tp.GetOutputPort())
bf.DecPolysOff()
fr = add_frame(bf, [(-10.8333, 2.5, 'top'), (-32.5, 2.5, 'top')])
return fr
def merge4():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test6.vtk')
r2 = vtk.vtkPolyDataReader()
r2.SetFileName('einzeln/test7.vtk')
tr = vtk.vtkTransform()
tr.RotateZ(270)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(tr)
tp.SetInputConnection(r2.GetOutputPort())
bf = vtkboolPython.vtkPolyDataBooleanFilter()
bf.SetInputConnection(r.GetOutputPort())
bf.SetInputConnection(1, tp.GetOutputPort())
bf.DecPolysOff()
fr = add_frame(bf, [(10.8333, 2.5, 'top'), (32.5, 2.5, 'top')])
return fr
def merge5():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test2.vtk')
fr = add_frame(r, [(-10.8333, 2.5, 'top'), (-32.5, 2.5, 'top')])
return fr
def merge6():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test8.vtk')
fr = add_frame(r, [(17.333, 2.5, 'top'), (34.666, 2.5, 'top')])
return fr
def merge7():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test14.vtk')
fr = add_frame(r, [(6.2291666, 2.5, 'top')])
return fr
def merge8():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test12.vtk')
r2 = vtk.vtkPolyDataReader()
r2.SetFileName('einzeln/test13.vtk')
tr = vtk.vtkTransform()
tr.RotateZ(90)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(tr)
tp.SetInputConnection(r.GetOutputPort())
bf = vtkboolPython.vtkPolyDataBooleanFilter()
bf.SetInputConnection(r2.GetOutputPort())
bf.SetInputConnection(1, tp.GetOutputPort())
bf.DecPolysOff()
fr = add_frame(bf, [(-6.2291666, 2.5, 'top')])
return fr
def merge9():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test10.vtk')
fr = add_frame(r, [(-8.486111, 2.5, 'top'), (-25.458333, 2.5, 'top'), (-42.430555, 2.5, 'top')])
return fr
def merge10():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test11.vtk')
fr = add_frame(r, [(-9.1924, 2.5, 'top')])
return fr
def merge11():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test9.vtk')
fr = add_frame(r, [(4.291666, 2.5, 'top')])
return fr
def merge12():
r = vtk.vtkPolyDataReader()
r.SetFileName('einzeln/test3.vtk')
fr = add_frame(r, [(5.6875, 2.5, 'top')])
return fr
def create_rest():
pr = [(0, 0), (1.5, 0), (1.5, 1.875/3), (1.75, 1.875/3), (1.75, 2*1.875/3), (2, 2*1.875/3), (2, 1.875), (0, 1.875)]
extr = extrude(pr, 1.625)
tr = vtk.vtkTransform()
tr.Translate(1.875/2, 0, 0)
tr.RotateZ(90)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(tr)
tp.SetInputConnection(extr.GetOutputPort())
return tp
if __name__ == '__main__':
p1 = merge1()
p2 = merge2()
p3 = merge3()
p4 = merge4()
p5 = merge5()
p6 = merge6()
p7 = merge7()
p8 = merge8()
p9 = merge9()
p10 = merge10()
p11 = merge11()
p12 = merge12()
A = Alignment(p1)
a2 = A.add_top(p2, p1)
a3 = A.add_top(p3, a2)
a4 = A.add_right(p4, a3)
a5 = A.add_top(p5, a3)
a6 = A.add_right(p6, a5)
a7 = A.add_top(p7, a5)
a8 = A.add_right(p8, a7)
a9 = A.add_right(p9, a8)
a10 = A.add_top(p10, a7)
a11 = A.add_right(p11, a10)
a12 = A.add_right(p12, a11)
holds = [ (i*2.875, 0, 'bottom') for i in range(12) ]
a13 = A.add_top(add_frame(repeat(create_rest(), 12), holds), a10)
A.write('band.vtk', 'band.stl')
os.makedirs('frames2', exist_ok=True)
for k, v in dict(locals()).items():
if k == 'p1' or re.match(r'a\d+', k):
w = vtk.vtkPolyDataWriter()
w.SetInputConnection(v.GetOutputPort())
w.SetFileName(f'frames2/{k}.vtk')
w.Update()
| apache-2.0 | -1,804,313,216,860,224,500 | 25.853081 | 146 | 0.616131 | false |
lol/BCI-BO-old | test2.py | 1 | 2067 | # import os
# # delete jobs on westgrid
# command = 'qdel '
# for i in range(16878874, 16880029):
# print command + str(i) + '.b0'
# # os.system(command + str(i) + '.b0')
# file_list = os.listdir('F:/results')
# for file in file_list:
# print 'java Friedman F:/results/'+file +' > F:/results/' + file[:-4]+'.tex'
# import os
# import paramiko
#
# ssh = paramiko.SSHClient()
# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# # ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
# ssh.connect('bugaboo.westgrid.ca', username='hosseinb', password='hbs25418')
# sftp = ssh.open_sftp()
# sftp.put('C:/Users/hoss/Desktop/test.pdf', 'test.pdf')
# stdin, stdout, stderr = ssh.exec_command('ls')
# for line in stdout:
# print '... ' + line.strip('\n')
# sftp.close()
# ssh.close()
# import paramiko, base64
# key = paramiko.RSAKey(data=base64.decodestring('AAA...'))
# client = paramiko.SSHClient()
# client.get_host_keys().add('bugaboo.westgrid.ca', 'ssh-rsa', key)
# client.connect('bugaboo.westgrid.ca', username='hosseinb', password='hbs25418')
# stdin, stdout, stderr = client.exec_command('ls')
# for line in stdout:
# print '... ' + line.strip('\n')
# client.close()
import os
res_path = "~/BCI_Framework/results/BCICIV2a"
res_path = "../results/BCICIV2a"
dir_list = os.listdir(res_path)
dir_list = map((lambda x: os.path.join(res_path, x)), dir_list)
print dir_list
n_files = len(dir_list)
while n_files > 0:
cur_file = list.pop(dir_list)
if os.path.isdir(cur_file):
dir_list = map((lambda x: os.path.join(cur_file, x)), os.listdir(cur_file)) + dir_list
else:
# new_name = cur_file
# last_ind = new_name.rfind('_')
# new_name = new_name[0:last_ind] + '_ALL-1' + new_name[last_ind:]
# print cur_file, new_name
# os.system('mv ' + cur_file + " " + new_name)
#
if 'ALL-1' in cur_file:
print cur_file
os.system('rm ' + cur_file)
n_files = len(dir_list)
| gpl-3.0 | 4,893,311,164,411,763,000 | 30.318182 | 96 | 0.602322 | false |
vlegoff/tsunami | src/secondaires/navigation/editeurs/eltedit/__init__.py | 1 | 4581 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'eltedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Au quel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from secondaires.navigation.elements import types
from primaires.format.fonctions import supprimer_accents, contient
from .presentation import EdtPresentation
class EdtEltedit(Editeur):
"""Classe définissant l'éditeur d'élément 'eltedit'.
"""
nom = "eltedit"
def __init__(self, personnage, identifiant="", attribut=None):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Editeur.__init__(self, instance_connexion, identifiant)
self.personnage = personnage
self.identifiant = identifiant
def __getnewargs__(self):
return (None, None)
def accueil(self):
"""Message d'accueil de l'éditeur.
On affiche les types disponibles.
"""
identifiant = self.identifiant
noms_types = tuple(
type(self).importeur.navigation.types_elements.keys())
noms_types = sorted(noms_types)
return "|tit|Création du prototype {}|ff|\n\n".format(identifiant) + \
"Entrez |cmd|le type d'élément|ff| que vous souhaitez créer " \
"ou |cmd|a|ff| pour annuler.\n" \
"Le type choisi ne pourra pas être modifié par la suite, " \
"soyez prudent.\n\n" \
"Liste des types existants : |cmd|" + "|ff|, |cmd|".join(
noms_types) + "|ff|"
def get_prompt(self):
return "-> "
def interpreter(self, msg):
"""Interprétation du message"""
msg = msg.lower()
if msg == "a":
self.pere.joueur.contextes.retirer()
self.pere.envoyer("Opération annulée.")
else:
type_choisi = ""
p_types = type(self).importeur.navigation.types_elements
for nom in p_types.keys():
if contient(nom, msg):
type_choisi = nom
if not type_choisi:
self.pere << "|err|Ce type est inconnu.|ff|"
else:
choix = types[type_choisi]
self.element = type(self).importeur.navigation.creer_element(
self.identifiant, choix)
enveloppe = EnveloppeObjet(EdtPresentation, self.element, "")
contexte = enveloppe.construire(self.personnage)
self.migrer_contexte(contexte)
| bsd-3-clause | -7,056,274,713,615,746,000 | 38.95614 | 79 | 0.653128 | false |
maas/maas | src/maasserver/migrations/maasserver/0107_chassis_to_pods.py | 1 | 4048 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import maasserver.models.cleansave
class Migration(migrations.Migration):
dependencies = [("maasserver", "0106_testing_status")]
operations = [
migrations.CreateModel(
name="PodHints",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
auto_created=True,
serialize=False,
primary_key=True,
),
),
("cores", models.IntegerField(default=0)),
("memory", models.IntegerField(default=0)),
("local_storage", models.BigIntegerField(default=0)),
("local_disks", models.IntegerField(default=-1)),
],
bases=(maasserver.models.cleansave.CleanSave, models.Model),
),
migrations.RemoveField(model_name="chassishints", name="chassis"),
migrations.DeleteModel(name="Chassis"),
migrations.DeleteModel(name="Storage"),
migrations.CreateModel(
name="Pod",
fields=[],
options={"proxy": True},
bases=("maasserver.bmc",),
),
migrations.AddField(
model_name="bmc",
name="architectures",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(),
size=None,
null=True,
default=list,
blank=True,
),
),
migrations.AddField(
model_name="bmc",
name="bmc_type",
field=models.IntegerField(
editable=False, choices=[(0, "BMC"), (1, "POD")], default=0
),
),
migrations.AddField(
model_name="bmc",
name="capabilities",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(),
size=None,
null=True,
default=list,
blank=True,
),
),
migrations.AddField(
model_name="bmc",
name="cores",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="bmc",
name="cpu_speed",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="bmc",
name="local_disks",
field=models.IntegerField(default=-1),
),
migrations.AddField(
model_name="bmc",
name="local_storage",
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name="bmc",
name="memory",
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name="bmc",
name="name",
field=models.CharField(
max_length=255, default="", unique=False, blank=True
),
),
migrations.AlterField(
model_name="node",
name="node_type",
field=models.IntegerField(
editable=False,
choices=[
(0, "Machine"),
(1, "Device"),
(2, "Rack controller"),
(3, "Region controller"),
(4, "Region and rack controller"),
],
default=0,
),
),
migrations.DeleteModel(name="ChassisHints"),
migrations.AddField(
model_name="podhints",
name="pod",
field=models.OneToOneField(
related_name="hints",
to="maasserver.BMC",
on_delete=models.CASCADE,
),
),
]
| agpl-3.0 | -1,416,959,160,371,569,200 | 30.379845 | 75 | 0.467638 | false |
datapythonista/pandas | pandas/tests/frame/methods/test_select_dtypes.py | 3 | 14319 | import numpy as np
import pytest
from pandas.core.dtypes.dtypes import ExtensionDtype
import pandas as pd
from pandas import (
DataFrame,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import ExtensionArray
class DummyDtype(ExtensionDtype):
type = int
def __init__(self, numeric):
self._numeric = numeric
@property
def name(self):
return "Dummy"
@property
def _is_numeric(self):
return self._numeric
class DummyArray(ExtensionArray):
def __init__(self, data, dtype):
self.data = data
self._dtype = dtype
def __array__(self, dtype):
return self.data
@property
def dtype(self):
return self._dtype
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, item):
pass
def copy(self):
return self
class TestSelectDtypes:
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6, dtype="u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
@pytest.mark.parametrize(
"include", [(np.bool_, "int"), (np.bool_, "integer"), ("bool", int)]
)
def test_select_dtypes_exclude_include_int(self, include):
# Fix select_dtypes(include='int') for Windows, FYI #36596
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6, dtype="int32"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
result = df.select_dtypes(include=include, exclude=exclude)
expected = df[["b", "c", "e"]]
tm.assert_frame_equal(result, expected)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
df = DataFrame(
{
"a": ["a", "b", "c"],
"b": [1, 2, 3],
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
@pytest.mark.parametrize(
"arr,expected",
(
(np.array([1, 2], dtype=np.int32), True),
(pd.array([1, 2], dtype="Int32"), True),
(DummyArray([1, 2], dtype=DummyDtype(numeric=True)), True),
(DummyArray([1, 2], dtype=DummyDtype(numeric=False)), False),
),
)
def test_select_dtypes_numeric(self, arr, expected):
# GH 35340
df = DataFrame(arr)
is_selected = df.select_dtypes(np.number).shape == df.shape
assert is_selected == expected
def test_select_dtypes_numeric_nullable_string(self, nullable_string_dtype):
arr = pd.array(["a", "b"], dtype=nullable_string_dtype)
df = DataFrame(arr)
is_selected = df.select_dtypes(np.number).shape == df.shape
assert not is_selected
| bsd-3-clause | -2,610,222,803,520,899,000 | 34.00978 | 86 | 0.482995 | false |
indexofire/gork | src/gork/contrib/gauth/templatetags/patch.py | 1 | 3642 | # -*- coding: utf-8 -*-
from distutils.version import StrictVersion
from django import get_version
if StrictVersion(get_version()) < '1.4':
from django.template import Node
from django.template import NodeList
from django.template import VariableDoesNotExist
from django.template import TemplateSyntaxError
from django.template.base import TextNode
# copied from django 1.4b
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
# copied from django 1.4b
def parse(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
self.empty_variable(token)
filter_expression = self.compile_filter(token.contents)
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
# execute callback function for this tag and append
# resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError, e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def parser_patch(instance):
instance.__class__.parse = parse
return instance
else:
from django.template.defaulttags import IfNode
parser_patch = lambda instance: instance
| mit | -6,789,246,199,401,640,000 | 41.847059 | 99 | 0.546952 | false |
hiteshagrawal/python | my-object.py | 1 | 1424 | #!/usr/bin/python
class PartyAnimal:
#x = 0
#name = ""
def __init__(self,nam):
self.x = 0
self.name = nam
print self.name, "I am constructed"
def party(self):
self.x += 1
print self.name,"So far", self.x
def __del__(self):
print self.name, "I am destructed", self.x
an = PartyAnimal("Hitesh")
an.party()
an.party()
an.party()
ab = PartyAnimal("Naresh")
ab.party()
an.party()
ab.party()
#Extending, Inheriting the partyanimal class
class FootballFan(PartyAnimal):
points = 0
def touchdown(self):
self.points += 7
self.party()
print self.name, "points", self.points
if __name__ == '__main__':
ac = FootballFan("Nidhi")
ac.touchdown()
class CricketFan(PartyAnimal):
def __init__(self,nam,score):
print("CricketFan Constructed")
PartyAnimal.__init__(self,nam)
self.score = score
def cricketscore(self):
self.score += 1
print(self.name, "runs", self.score)
Avni = CricketFan("Avni",5)
Avni.cricketscore()
class HockeyFan(PartyAnimal):
#Exception AttributeError: "HockeyFan instance has no attribute 'name'"
#in <bound method HockeyFan.__del__ of <__main__.HockeyFan instance at 0x1004dbdd0>> ignored
def __init__(self, nam, hscore):
print("HockeyFan Constructed")
PartyAnimal.__init__(self,nam)
self.hscore = int(hscore)
def hockeyscore(self):
self.hscore += 1
print "hockeyscore", self.hscore
yashvi = HockeyFan("Yashvi", 2)
yashvi.hockeyscore()
| gpl-2.0 | 3,900,743,497,929,278,500 | 19.342857 | 93 | 0.672753 | false |
dknlght/dkodi | src/script.module.urlresolver/lib/urlresolver/plugins/thevid.py | 1 | 2253 | """
Plugin for UrlResolver
Copyright (C) 2017 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from six.moves import reload_module
import urlresolver.plugins.thevid_gmu
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
logger = common.log_utils.Logger.get_logger(__name__)
logger.disable()
VID_SOURCE = r'https://raw.githubusercontent.com/jsergio123/script.module.urlresolver/master/lib/urlresolver/plugins/thevid_gmu.py'
VID_PATH = os.path.join(common.plugins_path, 'thevid_gmu.py')
class TheVidResolver(UrlResolver):
name = "TheVid"
domains = ["thevid.net", "thevid.tv", "thevid.live"]
pattern = r'(?://|\.)(thevid\.(?:net|tv|live))/(?:video|e|v)/([A-Za-z0-9]+)'
def get_media_url(self, host, media_id):
try:
self._auto_update(VID_SOURCE, VID_PATH)
reload_module(urlresolver.plugins.thevid_gmu)
web_url = self.get_url(host, media_id)
return urlresolver.plugins.thevid_gmu.get_media_url(web_url)
except Exception as e:
raise ResolverError('Exception during thevid.net resolve parse: %s' % e)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='http://{host}/e/{media_id}/')
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
xml.append('<setting id="%s_auto_update" type="bool" label="Automatically update resolver" default="true"/>' % (cls.__name__))
xml.append('<setting id="%s_etag" type="text" default="" visible="false"/>' % (cls.__name__))
return xml
| gpl-2.0 | -7,828,071,276,280,572,000 | 41.509434 | 134 | 0.683977 | false |
trabucayre/gnuradio | gr-analog/examples/fmtest.py | 1 | 7223 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
import numpy
try:
import pylab
except ImportError:
print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).")
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3,
tau=75e-6, fh=0.925*if_rate/2.0)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in range(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing / 2.0
t_bw = chspacing / 10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print("Number of taps: ", len(self._taps))
print("Number of channels: ", self._M)
print("Taps per channel: ", tpc)
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in range(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = numpy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(numpy.floor(numpy.sqrt(fm.num_rx_channels())))
Nrows = int(numpy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in range(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
X_o = 10.0*numpy.log10(abs(X))
#f_o = numpy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = numpy.arange(0, fs_o / 2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs_o
Tmax = len(d)*Ts
t_o = numpy.arange(0, Tmax, Ts)
x_t = numpy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 | 2,754,687,838,082,040,000 | 33.070755 | 89 | 0.524574 | false |
esdalmaijer/PyGaze | pygaze/_sound/basesound.py | 1 | 5113 | # -*- coding: utf-8 -*-
#
# This file is part of PyGaze - the open-source toolbox for eye tracking
#
# PyGaze is a Python module for easily creating gaze contingent experiments
# or other software (as well as non-gaze contingent experiments/software)
# Copyright (C) 2012-2013 Edwin S. Dalmaijer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# The BaseClasses are meant to store the documentation on all methods of a
# class, but not to contain any functionality whatsoever. BaseClass is
# inherited by all of the subclasses, and the documentation is copied using
# pygaze.copy_docstr. If you intend to make your own subclass for the current
# baseclass, be sure to inherit BaseClass, copy the documentation, and
# redefine the methods as you see fit, e.g.:
#
#import pygaze
#from pygaze._display.basedisplay import BaseDisplay
#
#class DummyDisplay(BaseDisplay):
#
# """An example child of BaseDisplay"""
#
# def __init__(self, *args, **kwargs):
#
# """Initializes a DummyDisplay instance"""
#
# pygaze.copy_docstring(BaseDisplay,DummyDisplay)
#
# def show(self):
#
# # note that here no docstring is provided, as it is copied from
# # the parent class
#
# print("Display.show call at %d" % int(pygaze.clock.get_time()))
#
class BaseSound:
"""A Sound class for creating and playing sounds"""
def __init__(self):
"""
Initializes a Sound Instance
arguments
None
keyword arguments
osc -- type of oscillator; allowed: "sine", "saw", "square",
"whitenoise" (default = SOUNDOSCILLATOR)
freq -- sound frequency in Herz, either float or integer
(default = SOUNDFREQUENCY)
length -- sound length in milliseconds (default =
SOUNDLENGTH)
attack -- sound attack ('fade in') in milliseconds (default =
SOUNDATTACK)
decay -- sound decay ('fade out') in milliseconds (default =
SOUNDDECAY)
soundfile -- full path to soundfile with .ogg or .wav extension
or None for no file; if a file is specified, all
other keyword arguments will be ignored (default =
None)
"""
pass
def pan(self):
"""
Sets the panning of a sound (the volume of the 'unpanned'
channel decreases, while the other channel remaines the same)
arguments
panning -- either a float between -1 and 1, "left" or "right":
"left": full panning to left (same as -1)
< 0: panning to left
0: no panning
> 0: panning to right
"right": full panning to left (same as 1)
keyword arguments
None
returns
None -- self.sound is panned
"""
pass
def play(self):
"""
Plays specified sound (keyword argument loops specifies how many
repeats after being played once, -1 is infinite); function does not
wait for playback end, but returns immediately
arguments
None
keyword arguments
repeats -- specifies the amount of repeats after being played
once (-1 is infinite) (default = 0)
returns
None -- self.sound is played
"""
pass
def stop(self):
"""
Stops sound playback
arguments
None
keyword arguments
None
returns
None -- self.sound stops playing
"""
pass
def set_volume(self):
"""
Set the playback volume (loudness) to specified value
arguments
volume -- float between 0 and 1
keyword arguments
None
returns
None -- sets self.sound volume to specified value
"""
pass | gpl-3.0 | 4,793,895,376,749,514,000 | 27.228571 | 78 | 0.539409 | false |
makokal/crowdsim | entities/agents.py | 1 | 11997 |
from random import randint, choice
from math import sin, cos, radians, exp, sqrt, fabs
import pygame
from pygame.sprite import Sprite
# from pygame.math import vec2d
from utils import SIM_COLORS, SCALE, SIGN
from utils import euclidean_distance, vec2d, Rotate2D
import numpy as np
class Agent(Sprite):
""" A agent sprite that bounces off walls and changes its
direction from time to time.
"""
# __slots__ = ('id', 'screen', 'game', 'field', 'image', \
# 'vmax', 'position', 'velocity', 'acceleration'\
# 'radius', 'relaxation_time', 'direction', 'neighbors'\
# 'forces, force_factors', 'waypoints')
def __init__(self, agent_id, screen, game, agent_image,
field, init_position, init_direction, max_speed, waypoints,
radius = 0.2, relaxation_time = 0.5, atype = 0):
""" Create a new Agent.
screen:
The screen on which the agent lives (must be a
pygame Surface object, such as pygame.display)
game:
The game object that holds information about the
game world.
agent_image:
Image reprsenting the agent in the simulation
field:
A Rect specifying the 'playing field' boundaries.
The agent will bounce off the 'walls' of this
field.
init_position:
A vec2d or a pair specifying the initial position
of the agent on the screen in metres
init_direction:
A vec2d or a pair specifying the initial direction
of the agent. Must have an angle that is a
multiple of 45 degres.
vmax:
maximum agent speed, in (m/s)
waypoints:
a list of waypoints for the agent to follow
"""
Sprite.__init__(self)
self._id = agent_id
self.screen = screen
self.game = game
self._vmax = max_speed
self._field = field
self._radius = radius
self._relaxation_time = relaxation_time
self._type = atype
# the current image representing the agent
self._image = agent_image
# A vector specifying the agent's position on the screen
self._position = vec2d(init_position)
self.prev_pos = vec2d(self._position)
# The direction is a normalized vector
self._direction = vec2d(init_direction).normalized()
self._velocity = vec2d(init_direction)
self._acceleration = vec2d(0.0, 0.0)
self._waypoints = waypoints
self._waypoint_index = 0
self._neighbors = []
# # default no forces
self._social_force = vec2d(0.0, 0.0)
self._desired_force = vec2d(0.0, 0.0)
self._obstacle_force = vec2d(0.0, 0.0)
self._lookahead_force = vec2d(0.0, 0.0)
def draw(self):
"""
Draw the agent onto the screen that is set in the constructor
"""
x, y = int(self._position.x*SCALE), int(self._position.y*SCALE)
r = int(self._radius*SCALE)
# poly = [(x-r/2, y), (x, y-40), (x+r/2, y), (x, y+r/2)]
poly = np.array([[x-r/2, y], [x, y-30], [x+r/2, y], [x, y+r/2]])
rpoly = Rotate2D(poly, (x,y), radians(self._direction.get_angle()))
# self.draw_rect = self._image.get_rect().move(
# self._position.x - self._image_w / 2,
# self._position.y - self._image_h / 2)
# self.screen.blit(self._image, self.draw_rect)
# agent representation
if self._type == 0:
pygame.draw.circle(self.screen, SIM_COLORS['yellow'], (x, y), r, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['yellow'], (x, y, 20, 50), int(0))
elif self._type == 1:
pygame.draw.circle(self.screen, SIM_COLORS['aqua'], (x, y), r, int(0))
# pygame.draw.polygon(self.screen, SIM_COLORS['white'], rpoly, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['white'], self._get_ellipse_params(x, y, r, r/2), int(0))
# draw the forces on the agent
self.draw_forces()
def draw_forces(self):
# desired force
pygame.draw.line(self.screen, SIM_COLORS['red'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.desired_force[0]*SCALE, (self._position.y*SCALE) + self.desired_force[1]*SCALE), 2)
# social force
pygame.draw.line(self.screen, SIM_COLORS['lime'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.social_force[0]*SCALE, (self._position.y*SCALE) + self.social_force[1]*SCALE), 2)
# obstacle force
pygame.draw.line(self.screen, SIM_COLORS['blue'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.obstacle_force[0]*SCALE, (self._position.y*SCALE) + self.obstacle_force[1]*SCALE), 2)
def reached_waypoint(self, waypoint):
""" Check if the agent has reached the given waypoint so we
advance to the next one. Reaching means being in the
waypoint circle
"""
if euclidean_distance((self._position.x, self._position.y), waypoint.position) <= waypoint.radius:
return True
else:
return False
def update(self, time_passed):
# cim = Image.open('assets/blueagent.bmp')
# rim = cim.rotate(self._direction.get_angle(), expand=1)
# self._image = pygame.image.fromstring(rim.tostring(), rim.size, rim.mode)
# When the image is rotated, its size is changed.
# self._image_w, self._image_h = self._image.get_size()
# bounds_rect = self.screen.get_rect().inflate(-self._image_w, -self._image_h)
bounds_rect = self.game.field_box.get_internal_rect()
self._direction = vec2d(self._velocity.x, -self._velocity.y)
if self._position.x*SCALE < bounds_rect.left:
self._position.x = bounds_rect.left/SCALE
self._direction.x *= -1
elif self._position.x*SCALE > bounds_rect.right:
self._position.x = bounds_rect.right/SCALE
self._direction.x *= -1
elif self._position.y*SCALE < bounds_rect.top:
self._position.y = bounds_rect.top/SCALE
self._direction.y *= -1
elif self._position.y*SCALE > bounds_rect.bottom:
self._position.y = bounds_rect.bottom/SCALE
self._direction.y *= -1
def social_move(self, time_passed):
# force is computed over neighbors with 0.5m radius (= 0.5*100 px)
self._neighbors = self.game.get_agent_neighbors(self, (0.5*SCALE))
# compute the forces
self._social_force = self._compute_social_force()
self._desired_force = self._compute_desired_force()
self._obstacle_force = self._compute_obstacle_force()
self._lookahead_force = self._compute_lookahead_force()
# =================================================================
# Properties and how to compute them
# =================================================================
@property
def social_force(self):
return self._social_force
@property
def obstacle_force(self):
return self._obstacle_force
@property
def desired_force(self):
return self._desired_force
@property
def lookahead_force(self):
return self._lookahead_force
@property
def id(self):
return self._id
@property
def position(self):
return self._position
@position.setter
def position(self, newpos):
self._position = newpos
@property
def velocity(self):
return self._velocity
@property
def acceleration(self):
return self._acceleration
@property
def vmax(self):
return self._vmax
@property
def relaxation_time(self):
return self._relaxation_time
@property
def next_waypoint(self):
return self._waypoints[self._waypoint_index]
def _compute_social_force(self):
# variables according to Moussaid-Helbing paper
lambda_importance = 2.0
gamma = 0.35
n, n_prime = 2, 3
social_force = vec2d(0, 0)
for neighbor in self._neighbors:
# no social force with oneself
if neighbor.id == self.id:
continue
else:
# position difference
diff = neighbor.position - self.position
diff_direction = diff.normalized()
# velocity difference
vel_diff = self.velocity - neighbor.velocity
# interaction direction t_ij
interaction_vector = lambda_importance * vel_diff + diff_direction
if (interaction_vector.get_length()) == 0:
continue;
interaction_direction = interaction_vector / interaction_vector.get_length()
# theta (angle between interaction direction and position difference vector)
theta = interaction_direction.get_angle_between(diff_direction)
# model parameter B = gamma * ||D||
B = gamma * interaction_vector.get_length()
theta_rad = radians(theta)
force_vel_amount = -exp(-diff.get_length() / B - (n_prime * B * theta_rad)**2)
force_angle_amount = (-1 * SIGN(theta)) * exp(-diff.get_length() / B - (n * B * theta_rad)**2)
force_vel = force_vel_amount * interaction_direction
force_angle = force_angle_amount * interaction_direction.left_normal_vector()
# social_force[0] += force_vel.x + force_angle.x
# social_force[1] += force_vel.y + force_angle.y
social_force += force_vel + force_angle
return social_force
def _compute_desired_force(self):
if self.reached_waypoint(self.next_waypoint):
self._waypoint_index += 1
# if all waypoints are covered, go back to the beginning
# NOTE - this does not take into account birth and death waypoints yet
if self._waypoint_index == len(self._waypoints):
self._waypoint_index = 0
wp_force = self.next_waypoint.force_towards(self)
desired_force = wp_force
return desired_force
def _compute_obstacle_force(self):
obstacle_force = vec2d(0.0, 0.0)
# if there are no obstacles, there is no obstacle force
if len(self.game.obstacles) == 0:
return obstacle_force
# find the closest obstacle and the closest point on it
closest_distance, closest_point = self.game.obstacles[0].agent_distance(self)
for obstacle in self.game.obstacles:
other_distance, other_point = obstacle.agent_distance(self)
if other_distance < closest_distance:
closest_distance, closest_point = other_distance, other_point
distance = closest_distance - self._radius
if closest_distance > self._radius*5:
return obstacle_force
force_amount = exp(-distance)
min_diffn = (self._position - vec2d(closest_point)).normalized()
obstacle_force.x = (force_amount * min_diffn).x
obstacle_force.y = (force_amount * min_diffn).y
return obstacle_force
def _compute_lookahead_force(self):
lookahead_force = vec2d(0, 0)
return lookahead_force
def _get_ellipse_params(self, x, y, w, h):
return ((x-w/2), (y-h/2), w, h)
| bsd-3-clause | 6,701,376,794,375,503,000 | 33.875 | 134 | 0.563141 | false |
imanolarrieta/RL | examples/tutorial/ChainMDPTut_example.py | 1 | 1953 | #!/usr/bin/env python
"""
Domain Tutorial for RLPy
=================================
Assumes you have created the ChainMDPTut.py domain according to the
tutorial and placed it in the Domains/ directory.
Tests the agent using SARSA with a tabular representation.
"""
__author__ = "Robert H. Klein"
from rlpy.Domains import ChainMDPTut
from rlpy.Agents import SARSA
from rlpy.Representations import Tabular
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import os
import logging
def make_experiment(exp_id=1, path="./Results/Tutorial/ChainMDPTut-SARSA"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
## Domain:
chainSize = 50
domain = ChainMDPTut(chainSize=chainSize)
opt["domain"] = domain
## Representation
# discretization only needed for continuous state spaces, discarded otherwise
representation = Tabular(domain)
## Policy
policy = eGreedy(representation, epsilon=0.2)
## Agent
opt["agent"] = SARSA(representation=representation, policy=policy,
disount_factor=domain.discount_factor,
learn_rate=0.1)
opt["checks_per_policy"] = 100
opt["max_steps"] = 2000
opt["num_policy_checks"] = 10
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=True, # show policy / value function?
visualize_performance=1) # show performance runs?
experiment.plot()
experiment.save()
| bsd-3-clause | 1,422,288,029,209,603,600 | 31.016393 | 81 | 0.674859 | false |
feranick/MultiFit | Archive/multifit_3/multifit.py | 1 | 25354 | #! /usr/bin/env python
###=============================================================
### Multifit 3
### Nicola Ferralis <[email protected]>
### The entire code is covered by GNU Public License (GPL) v.3
###=============================================================
### Uncomment this if for headless servers.
#import matplotlib
#matplotlib.use('Agg')
### ---------------------------------------
from numpy import *
from lmfit.models import GaussianModel, LorentzianModel, PseudoVoigtModel, VoigtModel
import matplotlib.pyplot as plt
import sys, os.path, getopt, glob, csv
from os.path import exists
from multiprocessing import Pool
import multiprocessing as mp
####################################################################
''' Program definitions and configuration variables '''
####################################################################
class defPar:
version = '3-20151104a'
### Define number of total peaks (do not change: this is read from file)
numPeaks = 0
### Name input paramter file
inputParFile = 'input_parameters.csv'
# Save summary fitting results
summary = 'summary.csv'
# max reduced chi square for reliable results
redchi = 10
# value assigned to D5G wheen fit is wrong
outliar = 0
### Plot initial fitting curve
initCurve = True
### Multiprocessing?
multiproc = True
### Max number of processor. Uncomment:
### first: max allowed by hardware
### second: max specified.
numProc = mp.cpu_count()
#numProc = 4
### Peak search optimization
peakPosOpt = False
### Resolution for plots
dpiPlot = 150
###Format Plot
formatPlot = 0 #png
#formatPlot = 1 #svg
### Parameters for H:C conversion - 2015-09-25
mHC = 0.8824
bHC = -0.0575
####################################################################
''' Main routine to perform and plot the fit '''
####################################################################
def calculate(x, y, x1, y1, file, type, processMap, showPlot, lab):
### Load initialization parameters from csv file.
with open(defPar.inputParFile, 'rU') as inputFile:
input = csv.reader(inputFile)
numRows = 0
inval=[]
for row in input:
defPar.numPeaks = len(row)-1
row = [nulStrConvDigit(entry) for entry in row]
inval.append(row)
numRows +=1
inputFile.close()
inv = resize(inval, [numRows, defPar.numPeaks+1])
fpeak = []
# define active peaks
for i in range(1, defPar.numPeaks+1):
fpeak.extend([int(inv[1,i])])
p = Peak(type)
numActivePeaks = 0
for i in range (0, defPar.numPeaks):
if fpeak[i] != 0:
numActivePeaks +=1
print (' Fitting with ' + str(numActivePeaks) + ' (' + p.typec + ') peaks')
if(defPar.peakPosOpt==True):
print(' Peak Search Optimization: ON\n')
else:
print(' Peak Search Optimization: OFF\n')
### Initialize parameters for fit.
pars = p.peak[0].make_params()
for i in range (0, defPar.numPeaks):
if fpeak[i]!=0:
fac1 = 2
if (defPar.peakPosOpt == True):
fac = fac1 - abs(y[ix(x,inv[2,i+1]+fac1*inv[5,i+1])] - y[ix(x,inv[2,i+1]-fac1*inv[5,i+1])]) / \
max(y[ix(x,inv[2,i+1]-fac1*inv[5,i+1]):ix(x,inv[2,i+1]+fac1*inv[5,i+1])])
else:
fac = fac1
print(' Peak {:}'.format(str(i)) +': [' + str(inv[2,i+1]-fac*inv[5,i+1]) + ', ' + \
str(inv[2,i+1]+fac*inv[5,i+1]) + ']')
pars += p.peak[i].guess(y[ix(x,inv[2,i+1]-fac*inv[5,i+1]):ix(x,inv[2,i+1]+fac*inv[5,i+1])] , \
x=x[ix(x,inv[2,i+1]-fac*inv[5,i+1]):ix(x,inv[2,i+1]+fac*inv[5,i+1])])
pars['p{:}_center'.format(str(i))].set(min = inv[3,i+1], max = inv[4,i+1])
pars['p{:}_sigma'.format(str(i))].set(min = inv[6,i+1], max = inv [7,i+1])
pars['p{:}_amplitude'.format(str(i))].set(min=inv[9,i+1], max = inv[10,i+1])
if (type ==0):
pars['p{:}_fraction'.format(str(i))].set(min = inv[12,i+1], max = inv[13,i+1])
if (type ==3):
pars['p{:}_gamma'.format(str(i))].set(inv[5,i+1])
### Add relevant peak to fitting procedure.
mod = p.peak[1]
for i in range (2,defPar.numPeaks):
if fpeak[i]!=0:
mod += p.peak[i]
if fpeak[0] != 0:
mod += p.peak[0]
### Initialize prefitting curves
init = mod.eval(pars, x=x)
### Perform fitting and display report
print('\n************************************************************')
print(' Running fit on file: ' + file + ' (' + str(x1) + ', ' + str(y1) + ')')
out = mod.fit(y, pars,x=x)
print(' Done! \n')
print(' Showing results for: ' + file + ' (' + str(x1) + ', ' + str(y1) + ')')
print(out.fit_report(min_correl=0.25))
### Output file names.
outfile = 'fit_' + file # Save individual fitting results
plotfile = os.path.splitext(file)[0] + '_fit' # Save plot as image
if os.path.isfile(defPar.summary) == False:
header = True
else:
header = False
print('\nFit successful: ' + str(out.success))
d5g = out.best_values['p2_amplitude']/out.best_values['p6_amplitude']
d4d5g = (out.best_values['p1_amplitude']+out.best_values['p2_amplitude'])/out.best_values['p6_amplitude']
d1g = out.best_values['p3_amplitude']/out.best_values['p6_amplitude']
d1d1g = out.best_values['p3_amplitude']/(out.best_values['p3_amplitude']+out.best_values['p6_amplitude'])
hc = defPar.mHC*d5g + defPar.bHC
wG = out.best_values['p6_sigma']*2
if (processMap == False):
if (fpeak[2] == 1 & fpeak[3] == 1 & fpeak[6] == 1):
print('D5/G = {:f}'.format(d5g))
print('H:C = {:f}'.format(hc))
print('(D4+D5)/G = {:f}'.format(d4d5g))
print('D1/G = {:f}'.format(d1g))
if type ==0:
print('G: {:f}% Gaussian'.format(out.best_values['p6_fraction']*100))
print('Fit type: {:}'.format(p.typec))
print('Chi-square: {:}'.format(out.chisqr))
print('Reduced Chi-square: {:}\n'.format(out.redchi))
### Uncomment to enable saving results of each fit in a separate file.
'''
with open(outfile, "a") as text_file:
text_file.write('\nD5/G = {:f}'.format(d5g))
text_file.write('\H:C = {:f}'.format(hc))
text_file.write('\n(D4+D5)/G = {:f}'.format(d4d5g))
text_file.write('\nD1/G = {:f}'.format(d1g))
if type ==0:
text_file.write('\nG %Gaussian: {:f}'.format(out.best_values['p5_fraction']))
text_file.write('\nFit type: {:}'.format(p.typec))
text_file.write('\nChi-square: {:}'.format(out.chisqr))
text_file.write('\nReduced Chi-square: {:}\n'.format(out.redchi))
'''
### Write Summary
summaryFile = [file, \
d5g, d4d5g, hc, d1g, d1d1g, \
out.best_values['p3_amplitude'], \
out.best_values['p1_amplitude'], \
out.best_values['p2_amplitude'], \
out.best_values['p6_amplitude'], \
out.best_values['p6_sigma']*2, \
out.best_values['p6_center']]
if type ==0:
summaryFile.extend([out.best_values['p2_fraction'], \
out.best_values['p3_fraction'], \
out.best_values['p6_fraction']])
else:
for i in range(0,3):
summaryFile.extend([type-1])
summaryFile.extend([out.chisqr, out.redchi, p.typec, out.success, \
x1, y1, lab])
with open(defPar.summary, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryFile)
sum_file.close()
if(processMap == True):
saveMap(file, out, 'D5G', d5g, x1, y1)
saveMap(file, out, 'D4D5G', d4d5g, x1, y1)
saveMap(file, out, 'D1G', d1g, x1, y1)
saveMap(file, out, 'D1GD1', d1d1g, x1, y1)
saveMap(file, out, 'HC', hc, x1, y1)
saveMap(file, out, 'wG', wG, x1, y1)
saveMapMulti(file, out, hc, wG, d5g, d1g, d4d5g, d4d5g+d1g, x1, y1, lab,1)
saveMapMulti(file, out, hc, wG, out.best_values['p6_amplitude'], \
out.best_values['p3_amplitude'], \
out.best_values['p2_amplitude'], \
out.best_values['p1_amplitude'], x1, y1, lab, 2)
else:
### Plot optimal fit and individial components
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, label='data')
if(defPar.initCurve == True):
ax.plot(x, init, 'k--', label='initial')
ax.plot(x, out.best_fit, 'r-', label='fit')
y0 = p.peak[0].eval(x = x, **out.best_values)
#ax.plot(x,y0,'g')
y = [None]*(defPar.numPeaks + 1)
for i in range (0,defPar.numPeaks):
if (fpeak[i] ==1):
y[i] = p.peak[i].eval(x = x, **out.best_values)
if (i==2 or i==6):
ax.plot(x,y[i],'g',linewidth=2.0)
else:
ax.plot(x,y[i],'g')
ax.text(0.05, 0.875, 'Fit type: {:}\nD5/G = {:f}\nRed. Chi sq: {:}'.format( \
p.typec, \
d5g, out.redchi), transform=ax.transAxes)
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity [arb. units]')
plt.title(file)
plt.legend()
plt.grid(True)
plt.xlim([min(x), max(x)])
if(defPar.formatPlot == 0):
plt.savefig(plotfile + '.png', dpi = defPar.dpiPlot, format = 'png') # Save plot
if(defPar.formatPlot == 1):
plt.savefig(plotfile + '.svg', dpi = defPar.dpiPlot, format = 'svg') # Save plot
if(showPlot == True):
print('*** Close plot to quit ***\n')
plt.show()
plt.close()
del p
del out
####################################################################
''' Main program '''
####################################################################
def main():
print('\n******************************')
print(' MultiFit v.' + defPar.version)
print('******************************')
try:
opts, args = getopt.getopt(sys.argv[1:], "bftmitph:", ["batch", "file", "type", "map", "input-par", "test", "plot", "help"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
# If parameter file not present, make one
if not exists(defPar.inputParFile):
print ('\n Init parameter not found. Generating a new one...')
genInitPar()
# If summary file is not present, make it and fill header
makeHeaderSummary()
if(defPar.multiproc == True):
print('\n Multiprocessing enabled: ' + str(defPar.numProc) + '/' + str(mp.cpu_count()) + ' CPUs\n')
else:
print('\n Multiprocessing disabled\n')
for o, a in opts:
if o in ("-b" , "--batch"):
try:
type = sys.argv[2]
except:
usage()
sys.exit(2)
type = int(sys.argv[2])
i = 0
if(defPar.multiproc == True):
p = Pool(defPar.numProc)
for f in glob.glob('*.txt'):
if (f != 'summary.txt'):
rs = readSingleSpectra(f)
p.apply_async(calculate, args=(rs.x, rs.y, '0', '0', f, type, False, False, i))
i += 1
p.close()
p.join()
else:
for f in glob.glob('*.txt'):
if (f != 'summary.txt'):
rs = readSingleSpectra(f)
calculate(rs.x, rs.y, '0', '0', f, type, False, False, i)
i += 1
addBlankLine(defPar.summary)
elif o in ("-f", "--file"):
try:
type = sys.argv[3]
except:
usage()
sys.exit(2)
file = str(sys.argv[2])
type = int(sys.argv[3])
rs = readSingleSpectra(file)
calculate(rs.x, rs.y, '0', '0', file, type, False, True, '')
elif o in ("-p", "--plot"):
if(len(sys.argv) < 3):
if(defPar.multiproc == True):
p = Pool(defPar.numProc)
for f in glob.glob('*.txt'):
if (f != 'summary.txt'):
rs = readSingleSpectra(f)
print ("Saving plot for: " + f)
p.apply_async(plotData, args=(rs.x, rs.y, f, False))
p.close()
p.join()
else:
for f in glob.glob('*.txt'):
if (f != 'summary.txt'):
rs = readSingleSpectra(f)
print ("Saving plot for: " + f)
plotData(rs.x, rs.y, f, False)
else:
file = str(sys.argv[2])
rs = readSingleSpectra(file)
plotData(rs.x, rs.y, file, True)
elif o in ("-m", "--map"):
try:
type = sys.argv[3]
except:
usage()
sys.exit(2)
file = str(sys.argv[2])
type = int(sys.argv[3])
rm = readMap(file)
map = Map()
i=0
if(defPar.multiproc == True):
p = Pool(defPar.numProc)
for i in range (1, rm.num_lines):
p.apply_async(calculate, args=(rm.x, rm.y[i], rm.x1[i], rm.y1[i], file, type, True, False, i))
p.close()
p.join()
#map.draw(os.path.splitext(file)[0] + '_map.txt', True)
else:
for i in range (1, rm.num_lines):
calculate(rm.x, rm.y[i], rm.x1[i], rm.y1[i], file, type, True, False, i)
#map.draw(os.path.splitext(file)[0] + '_map.txt', True)
elif o in ("-t", "--test"):
file = str(sys.argv[2])
map = Map()
#map.readCoord(os.path.splitext(file)[0] + '_map.txt')
map.draw(os.path.splitext(file)[0] + '_map.txt', True)
elif o in ("-i", "--input-par"):
genInitPar()
else:
usage()
sys.exit(2)
####################################################################
''' Class to read map files (Horiba LabSpec5) '''
####################################################################
class readMap:
def __init__(self, file):
try:
with open(file) as openfile:
### Load data
self.num_lines = sum(1 for line in openfile)-1
data = loadtxt(file)
self.x1 = [None]*(self.num_lines)
self.y1 = [None]*(self.num_lines)
self.y = [None]*(self.num_lines)
self.x = data[0, 2:]
for i in range(0, self.num_lines):
self.x1[i] = data[i+1, 1]
self.y1[i] = data[i+1, 0]
self.y[i] = data[i+1, 2:]
except:
print(' File: ' + file + ' not found\n')
sys.exit(2)
####################################################################
''' Class to read individual spectra '''
####################################################################
class readSingleSpectra:
def __init__(self, file):
try:
with open(file):
data = loadtxt(file)
self.x = data[:, 0]
self.y = data[:, 1]
except:
print(' File: ' + file + ' not found\n')
sys.exit(2)
####################################################################
''' Class to define peaks and their properties '''
####################################################################
class Peak:
### Define the typology of the peak
def __init__(self, type):
self.peak= [None]*(defPar.numPeaks)
if type==0:
for i in range (0,defPar.numPeaks):
self.peak[i] = PseudoVoigtModel(prefix="p"+ str(i) +"_")
self.typec = "PseudoVoigt"
elif type == 1:
for i in range (0,defPar.numPeaks):
self.peak[i] = GaussianModel(prefix="p"+ str(i) +"_")
self.typec = "Gauss"
elif type == 2:
for i in range (0,defPar.numPeaks):
self.peak[i] = LorentzianModel(prefix="p"+ str(i) +"_")
self.typec = "Lorentz"
elif type ==3:
for i in range (0,defPar.numPeaks):
self.peak[i] = VoigtModel(prefix="p"+ str(i) +"_")
self.typec = "Voigt"
else:
print("Warning: type undefined. Using PseudoVoigt")
for i in range (0,defPar.numPeaks):
self.peak[i] = PseudoVoigtModel(prefix="p"+ str(i) +"_")
self.typec = "PVoigt"
####################################################################
''' Routine to generate initialization parameter file '''
####################################################################
def genInitPar():
if exists(defPar.inputParFile):
print(' Input parameter file: ' + defPar.inputParFile + ' already exists\n')
sys.exit(2)
else:
initPar = [('name', 'Base', 'D4', 'D5', 'D1', 'D3a', 'D3b', 'G', 'D2'), \
('activate peak',1,1,1,1,1,1,1,1), \
('center',1080,1160,1250,1330,1400,1470,1590,1710), \
('center min','','',1240,'','','','',''), \
('center max','','',1275,'','','','',''), \
('sigma',20,20,20,40,20,10,20,20), \
('sigma min',10,10,10,10,10,5,10,10), \
('sigma max',50,50,50,50,50,50,50,50), \
('amplitude','','','','','','','',''), \
('ampl. min',0,0,0,0,0,0,0,0), \
('ampl. max','','','','','','','',''), \
('fraction',0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5), \
('fraction min',0,0,0,0,0,0,0,0), \
('fraction max',1,1,1,1,1,1,1,1)]
with open(defPar.inputParFile, "a") as inputFile:
csv_out=csv.writer(inputFile)
for row in initPar:
csv_out.writerow(row)
inputFile.close()
print(' Input paramters saved in: ' + defPar.inputParFile)
####################################################################
''' Make header, if absent, for the summary file '''
####################################################################
def makeHeaderSummary():
if os.path.isfile(defPar.summary) == False:
summaryHeader = ['File','D5G','(D4+D5)/G','HC','D1/G', 'D1/(D1+G)',
'iD1','iD4','iD5','iG','wG','pG','D5%Gaussian', \
'D1%Gaussian','G%Gaussianfit','Chi-square',\
'red-chi-sq','Fit-type','Fit-OK','x1','y1', \
'label']
with open(defPar.summary, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryHeader)
sum_file.close()
####################################################################
''' Lists the program usage '''
####################################################################
def usage():
print('Usage: \n\n Single file:')
print(' python multifit.py -f filename n\n')
print(' Batch processing:')
print(' python multifit.py -b n\n')
print(' Map (acquired with Horiba LabSpec5): ')
print(' python multifit.py -m filename n\n')
print(' Create and save plot of data only (no fit): ')
print(' python multifit.py -p filename \n')
print(' Create and save plot of batch data (no fit): ')
print(' python multifit.py -p \n')
print(' Create new input paramter file (xlsx): ')
print(' python multifit.py -i \n')
print(' n = 0: PseudoVoigt 1: Gaussian 2: Lorentzian 3: Voigt\n')
print(' Important note: The first two entries in the map file from Labspec are empty.')
print(' For MultiFit.py to work, please add 1 to the first two entries. For example,')
print(' for the first line of the file looking like:\n')
print(' 1000.4694 1001.6013 1002.7333...')
print(' Change it to:\n')
print(' 1 1 1000.4694 1001.6013 1002.7333...\n\n')
####################################################################
''' Add blank line at the end of the summary spreadsheet '''
####################################################################
def addBlankLine(file):
try:
with open(file, "a") as sum_file:
sum_file.write('\n')
except:
print ('File busy!')
####################################################################
''' Finds data index for a given x value '''
####################################################################
def ix(arrval, value):
#return index of array *at or below* value
if value < min(arrval): return 0
return (where(arrval<=value)[0]).max()
####################################################################
''' Convert null or strings into floats '''
####################################################################
def nulStrConvDigit(x):
if (not x or not x.isdigit()):
return None
else:
return float(x)
####################################################################
''' Drawing only routine '''
####################################################################
def plotData(x, y, file, showPlot):
### Plot initial data
pngData = os.path.splitext(file)[0] # Save plot as image
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, label='data')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity [arb. units]')
plt.title(file)
#plt.legend()
plt.grid(True)
if(defPar.formatPlot == 0):
plt.savefig(pngData + '.png', dpi = defPar.dpiPlot, format = 'png') # Save plot
if(defPar.formatPlot == 1):
plt.savefig(pngData + '.svg', dpi = defPar.dpiPlot, format = 'svg') # Save plot
if(showPlot == True):
print('*** Close plot to quit ***\n')
plt.show()
plt.close()
####################################################################
''' Save map files '''
####################################################################
def saveMap(file, out, extension, s, x1, y1):
inputFile = os.path.splitext(file)[0] + '_' + extension + '_map.csv'
with open(inputFile, "a") as coord_file:
coord_file.write('{:},'.format(x1))
coord_file.write('{:},'.format(y1))
if (out.success == True and out.redchi < defPar.redchi):
coord_file.write('{:}\n'.format(s))
else:
coord_file.write('{:}\n'.format(defPar.outliar))
coord_file.close()
def saveMapMulti(file, out, s1, s2, s3, s4, s5, s6, x1, y1, lab, mtype):
if(mtype == 1):
inputFile = os.path.splitext(file)[0] + '_map-ratio.csv'
else:
inputFile = os.path.splitext(file)[0] + '_map-int.csv'
if (os.path.exists(inputFile) == False):
with open(inputFile, "a") as coord_file:
if(mtype == 1):
coord_file.write(',HC,wG,D5G,D1G,D4D5G,DG,X,Y\n')
else:
coord_file.write(',HC,wG,G,D1,D5,D4,X,Y\n')
coord_file.close()
with open(inputFile, "a") as coord_file:
if (out.success == True and out.redchi < defPar.redchi):
coord_file.write('{:},'.format(lab))
coord_file.write('{:},'.format(s1))
coord_file.write('{:},'.format(s2))
coord_file.write('{:},'.format(s3))
coord_file.write('{:},'.format(s4))
coord_file.write('{:},'.format(s5))
coord_file.write('{:},'.format(s6))
else:
coord_file.write('{:},'.format(lab))
for i in range (0, 6):
coord_file.write('{:},'.format(defPar.outliar))
coord_file.write('{:},'.format(x1))
coord_file.write('{:}\n'.format(y1))
coord_file.close()
####################################################################
''' Definition of class map'''
####################################################################
class Map:
def __init__(self):
self.x = []
self.y = []
self.z = []
def readCoord(self, file):
self.num_lines = sum(1 for line in open(file))
data = genfromtxt(file)
self.x = data[:,0]
self.y = data[:,1]
self.z = data[:,2]
def draw(self, file, showplot):
self.readCoord(file)
####################################################################
''' Main initialization routine '''
####################################################################
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | 4,940,404,559,458,342,000 | 36.121523 | 132 | 0.450974 | false |
lzamparo/SdA_reduce | plot_scripts/plot_finetuning_gg.py | 1 | 3249 | """ Process all the model finetuning output files in the given directory
and produce a lineplot of the top 10 models based on reconstruction error.
Fine-tuning file names look like this: finetune_sda_900_500_100_50.2013-06-16.02:25:26.981658
The average training error for each epoch and batch is reported:
e.g epoch 1, minibatch 1/2051, training error 487.399902
The validation error over all validation batches is reported at the end of each epoch:
e.g epoch 1, minibatch 2051/2051, validation error 266.505805
Each layer transition is marked by the line: Pickling the model..."""
import sys, re, os
import numpy as np
import pandas as pd
from collections import OrderedDict
from ggplot import *
# Extract the model name from each filename.
def extract_model_name(regex,filename):
match = regex.match(filename)
if match is not None:
return match.groups()[0]
# Extract the layer and cost from a line
def parse_line(line,data_regex):
match = data_regex.match(line)
if match is not None:
return match.groups()
else:
return (None, None, None, None, None)
input_dir = '/data/sda_output_data/test_finetune_output_mb_and_valid'
# read a list of all files in the directory that match model output files
currdir = os.getcwd()
os.chdir(input_dir)
model_files = os.listdir(".")
# compile a regex to extract the model from a given filename
model_name = re.compile(".*?sda_([\d_]+)\.*")
data_regex = re.compile("epoch ([\d]+)\, minibatch ([\d])+\/([\d]+)\, ([a-z]+) error ([\d.]+)")
# Store the results of the model search in this dictionary
# keys are model name, values are pandas dataframe type objects
training_dfs = OrderedDict()
validation_dfs = OrderedDict()
print "...Processing files"
# for each file:
for f in model_files:
# if this file is a pkl or other file, ignore it
if not f.startswith("finetune_sda"):
continue
# read the file, populate this file's entry in the three dicts
f_model = extract_model_name(model_name, f)
if f_model is None:
continue
if not training_dfs.has_key(f_model):
training_dfs[f_model]= OrderedDict()
validation_dfs[f_model] = []
infile = open(f, 'r')
for line in infile:
if not line.startswith("epoch"):
continue
(epoch, mb_index, mb_total, phase, err) = parse_line(line,data_regex)
if epoch is not None:
if phase == 'validation':
validation_dfs[f_model].append(float(err))
continue
if not training_dfs[f_model].has_key(epoch):
training_dfs[f_model][epoch] = [float(err)]
else:
training_dfs[f_model][epoch].append(float(err))
infile.close()
print "...Done"
print "...Subsampling from the validation scores to evenly get equal sized arrays"
min_len = np.inf
for key in validation_dfs:
f_len = len(validation_dfs[key])
if f_len < min_len:
min_len = f_len
for key in validation_dfs:
validation_array = np.asarray(validation_dfs[key],dtype=np.float)
f_len = len(validation_array)
idx = np.arange(f_len)
np.random.shuffle(idx)
idx = idx[:min_len]
idx.sort()
validation_dfs[key] = validation_array[idx]
| bsd-3-clause | -877,238,559,156,923,100 | 31.168317 | 95 | 0.664512 | false |
louisswarren/eden | formula.py | 1 | 3029 | from collections import namedtuple
def _paren(f):
if not isinstance(f, (Atom, Predicate, Universal, Existential)):
return '({})'.format(f)
else:
return str(f)
class Formula:
def __rshift__(self, other):
return Implication(self, other)
def __or__(self, other):
return Disjunction(self, other)
def __xor__(self, other):
return Conjunction(self, other)
class Atom(Formula, namedtuple('Atom', 'name')):
def __str__(self):
return str(self.name)
def free_terms(self):
return frozenset()
def term_sub(self, old, new):
return self
class Predicate(Formula, namedtuple('Predicate', 'name term')):
def __str__(self):
return str(self.name) + str(self.term)
def free_terms(self):
return frozenset((self.term, ))
def term_sub(self, old, new):
if self.term == old:
return Predicate(self.name, new)
else:
return self
class Implication(Formula, namedtuple('Implication', 'prem conc')):
def __str__(self):
return '{} → {}'.format(*map(_paren, (self.prem, self.conc)))
def free_terms(self):
return self.prem.free_terms() | self.conc.free_terms()
def term_sub(self, old, new):
return Implication(self.prem.term_sub(old, new),
self.conc.term_sub(old, new))
class Conjunction(Formula, namedtuple('Conjunction', 'left right')):
def __str__(self):
return '{} ∧ {}'.format(*map(_paren, (self.left, self.right)))
def free_terms(self):
return self.left.free_terms() | self.right.free_terms()
def term_sub(self, old, new):
return Conjunction(self.left.term_sub(old, new),
self.right.term_sub(old, new))
class Disjunction(Formula, namedtuple('Disjunction', 'left right')):
def __str__(self):
return '{} ∨ {}'.format(*map(_paren, (self.left, self.right)))
def free_terms(self):
return self.left.free_terms() | self.right.free_terms()
def term_sub(self, old, new):
return Disjunction(self.left.term_sub(old, new),
self.right.term_sub(old, new))
class Universal(Formula, namedtuple('Universal', 'term formula')):
def __str__(self):
return '∀{} {}'.format(self.term, _paren(self.formula))
def free_terms(self):
return self.formula.free_terms() - {self.term}
def term_sub(self, old, new):
if self.term != old:
return Universal(self.term, self.formula.term_sub(old, new))
else:
return self
class Existential(Formula, namedtuple('Existential', 'term formula')):
def __str__(self):
return '∃{} {}'.format(self.term, _paren(self.formula))
def free_terms(self):
return self.formula.free_terms - {self.term}
def term_sub(self, old, new):
if self.term != old:
return Universal(self.term, self.formula.term_sub(old, new))
else:
return self
| gpl-3.0 | 3,895,439,564,555,744,000 | 27.481132 | 72 | 0.584962 | false |
chuajiesheng/twitter-sentiment-analysis | analysis/word2vec.py | 1 | 4632 | import numpy as np
from sklearn.model_selection import *
from sklearn.ensemble import *
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
# gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
# random shuffle
from random import shuffle
# numpy
import numpy
# classifier
from sklearn.linear_model import LogisticRegression
import logging
import sys
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class TaggedLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
log.info('source load')
sources = {'./analysis/input/negative_tweets.txt': 'NEG', './analysis/input/neutral_tweets.txt': 'NEU', './analysis/input/positive_tweets.txt': 'POS'}
log.info('TaggedDocument')
sentences = TaggedLineSentence(sources)
log.info('D2V')
model = Doc2Vec(min_count=1, window=60, size=100, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
log.info('Epoch')
for epoch in range(10):
log.info('EPOCH: {}'.format(epoch))
model.train(sentences.sentences_perm())
import code; code.interact(local=dict(globals(), **locals()))
log.info('Model Save')
model.save('./imdb.d2v')
model = Doc2Vec.load('./imdb.d2v')
log.info('Sentiment')
X, Y = get_dataset()
ss = ShuffleSplit(n_splits=10, test_size=0.2, random_state=10)
for train, test in ss.split(X, Y):
size_train = len(train)
size_test = len(test)
train_arrays = numpy.zeros((size_train, 100))
train_labels = numpy.zeros(size_train)
X_train = np.array(X)[train]
y_train = Y[train]
X_test = np.array(X)[test]
y_test = Y[test]
for index, i in enumerate(train):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
train_arrays[index] = model.docvecs[prefix]
train_labels[index] = Y[i]
test_arrays = numpy.zeros((size_test, 100))
test_labels = numpy.zeros(size_test)
for index, i in enumerate(test):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
test_arrays[index] = model.docvecs[prefix]
test_labels[index] = Y[i]
log.info('Fitting')
classifier = LogisticRegression(C=1.0, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.00001)
classifier.fit(train_arrays, train_labels)
print(classifier.score(test_arrays, test_labels))
clf = RandomForestClassifier(random_state=0, n_estimators=80, class_weight='auto').fit(train_arrays, train_labels)
print(clf.score(test_arrays, test_labels))
def parts(str, current, elements):
if len(str) < 1:
return elements + [current]
if current == '' or current.startswith(str[0]):
return parts(str[1:], current + str[0], elements)
return parts(str[1:], str[0], elements + [current]) | apache-2.0 | -9,047,471,860,034,433,000 | 28.698718 | 150 | 0.618092 | false |
juix/scripts | motherless-ai/makeDB.py | 1 | 10266 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Authr: João Juíz
'''
import sys,os,re,parsedatetime
import lxml.etree
from lxml.cssselect import CSSSelector
from myDb import ExtendedDB
import config
htmldir = os.path.expanduser("~/.motherless-dl/html")
class HTMLMixin(object):
def __call__(self):
#self.handleHtml("C5B1863")
#self.handleAllHtml()
pass
def writeAllHtml(self):
""" read all files from ~/.motherless-dl/html/$ID and write features to DB
if $ID does not already exist. """
print("Html to database")
ids = self.getAllRetrievedIds()
ids_todo = ids.difference(self.getAllDbIds())
for i in ids_todo:
sys.stderr.write("\t%s\n"%i)
self.handleHtml(i)
self.db.commit()
def handleHtml(self, id_):
""" handle a single html file and write to DB """
with open(os.path.join(htmldir,id_)) as f:
#self.xmlTree = fromstring(f.read())
self.xmlTree = lxml.etree.HTML(f.read())
uploaderName, uploaderLink = self.extractA("div.thumb-member-username a")
uploader = (uploaderName, uploaderLink)
title = self.find("#view-upload-title")[0].text.strip()
views = self.extractHInfo("Views").replace(",","")
favourited = self.extractHInfo("Favorited").replace(",","")
time = self.parseDate(self.extractHInfo("Uploaded"))
tags = [(e.text,e.get("href")) for e in self.find("#media-tags-container a")]
groups = [(e.text,e.get("href")) for e in self.find("#media-groups-container a")]
comments = [self.parseComment(c) for c in self.find(".media-comment-contents")]
self.db.query("DELETE FROM users WHERE link=%s",(uploaderLink,))
self.db.save("users",link=uploaderLink,name=uploaderName)
self.db.query("DELETE FROM medium WHERE id=%s",(id_,))
self.db.save("medium",id=id_,uploaderlink=uploaderLink,
title=title,views=views,favourited=favourited,
time=time)
for tag in tags:
self.db.save("tags",id=id_,name=tag[0],link=tag[1])
for group in groups:
self.db.save("groups",id=id_,name=group[0],link=group[1])
for c in comments:
author,authorlink = c["author"]
self.db.query("DELETE FROM users WHERE link=%s",(authorlink,))
self.db.save("users",link=authorlink,name=author)
self.db.save("comments",id=id_,authorlink=authorlink,time=c["time"],content=c["content"])
# parse H file
hfile = os.path.join(htmldir,"H%s"%id_)
if not os.path.exists(hfile):
sys.stderr.write("WARNING H%s does not exist.\n"%id_)
return
with open(hfile) as f:
self.xmlTree = lxml.etree.HTML(f.read())
recommended = set([e.get("data-codename") for e in self.find("div[data-codename]")])
for r in recommended:
self.db.save("also_favourited",id=id_,id_also=r)
del self.xmlTree
def parseDate(self, s):
c = parsedatetime.Constants()
c.YearParseStyle = 0
c.DOWParseStyle = -1 # oder 0
c.CurrentDOWParseStyle = False
dt,flags = parsedatetime.Calendar(c).parseDT(s)
if flags == 0:
raise Exception("WARNING: Cannot parse date '%s'."%s)
return dt
def parseComment(self, c):
""" each comment calls this function """
a = c.xpath("h4/a")[0]
author = a.text.strip()
authorHref = a.get("href")
time = c.xpath("*[@class='media-comment-meta']")[0].text.strip()
content = c.xpath("div[@style]")[-1].xpath("string()").strip()
return dict(author=(author,authorHref),time=self.parseDate(time),content=content)
def extractA(self, selector):
""" return text and link of an a-tag """
l = self.find(selector)
if len(l) == 0: raise Exception("ElementNotFound: %s"%selector)
e = l[0]
if "href" not in e.keys(): raise Exception("AttributeNotFound: 'href' in %s."%selector)
href = e.get("href")
text = e.text.strip()
return text,href
def extractHInfo(self, text):
""" return text of parent element. e.g.
"<h1>News</h1> This is good news!" -> extractHInfo("News") = "News This is good news!"
"""
return self.findHeadline(text).xpath("text()")[-1].strip()
def findHeadline(self, text):
h = self.findByText(text)
if h is None: return None
else: return h.xpath("..")[0]
def findByText(self, text):
e = self.xmlTree.xpath("//*[contains(text(),'%s')]"%text)
if len(e) == 0: return None
else: return e[0]
def find(self, selector):
""" find by css """
sel = CSSSelector(selector)
return sel(self.xmlTree)
class IdsMixin(object):
""" Functions concerning file search """
def getAllDbIds(self):
""" get all ids stored in DB """
self.db.query("SELECT id FROM medium")
return [r[0] for r in self.db.cursor.fetchall()]
def writeIdsToDb(self):
print("Updating likes/dislikes in DB")
db = self.db
self.refreshStoredFiles()
all_ = self.getAllRetrievedIds()
#for x in all_: db.query("INSERT INTO ids_all VALUES (%s)",(x,))
#db.commit()
storedFiles = self.getStoredFiles()
storedIds = [i for i,f in storedFiles]
self.db.query("DELETE FROM ids_dislikes")
self.db.query("DELETE FROM ids_likes")
dislikes = all_.difference(storedIds)
for x in dislikes: db.query("INSERT INTO ids_dislikes VALUES (%s)",(x,))
likes = set([i for i,f in storedFiles if self.isFav(f)])
for x in likes: db.query("INSERT INTO ids_likes VALUES (%s)",(x,))
#testset = set(storedIds).difference(likes)
print("\t%d files stored."%len(set(storedIds)))
self.db.commit()
def getAllRetrievedIds(self):
""" get all ids ever downloaded """
return set([x for x in os.listdir(htmldir)
if not x.startswith("H")])
def isFav(self, path):
root = os.path.dirname(path)
return "favs" in root or "keep" in root
def pathToId(self, f):
r = re.findall(".*OTHERLESS\.COM\ \-\ ([^\.\-\ ]*)",f)
if len(r) == 0: return None # not a ml-file
if len(r[0]) > 7:
sys.stderr.write("%s, %s\n"%(r[0],f))
return r[0]
def walkAll(self):
""" return all file paths from motherless' config.downloadDirs """
for d in config.downloadDirs:
for f in self.walkDir(d):
yield f
def walkDir(self, path):
""" return all potentially useful file paths from @path """
for root, dirs, files in os.walk(path):
if "by-id" in root or os.path.basename(root) == "by-rating": continue
for f in files:
if f.startswith("_"): continue
yield os.path.join(root,f)
def _getPredictedLikes(self):
self.db.query("SELECT id FROM predictions WHERE cls = 1")
likes = [r[0] for r in self.db.cursor.fetchall()]
return likes
def linkPredictedLikes(self):
""" link all as like predicted files to config.predictedLikesDir """
self._forallPredictedLikes(os.symlink)
def _forallPredictedLikes(self, func):
""" call func(src, dest) for each file predicted as "like" """
destDir = config.predictedLikesDir
if not os.path.lexists(destDir): os.makedirs(destDir)
likes = self._getPredictedLikes()
stored = dict(self.getStoredFiles())
log = []
for i in likes:
f = stored[i]
dest = os.path.join(destDir,os.path.basename(f))
if os.path.lexists(dest):
sys.stderr.write("WARNING: %s exists more than once.\n"%i)
continue
func(f,dest)
#print(f,dest)
log.append((f,dest))
import json
with open("/tmp/motherless-ai.log","w") as f:
json.dump(log,f)
def mvPredictedLikes(self):
""" link all as like predicted files to config.predictedLikesDir """
self._forallPredictedLikes(os.rename)
def getStoredFiles(self):
""" get list of (id,path) of all files stored on hd according to DB """
self.db.query("SELECT id, path FROM file_system")
return [(r[0],r[1]) for r in self.db.cursor.fetchall()]
def refreshStoredFiles(self):
""" write all file paths in config.downloadDirs and their ids to DB """
self.db.query("DELETE FROM file_system")
for f in self.walkAll():
i = self.pathToId(f)
if i is None: continue
self.db.query("INSERT INTO file_system VALUES (%s,%s,%s)",(i,f,self.isFav(f)))
self.db.commit()
def _del_eraseFiles(self):
""" erase files removed in config.predictedLikesDir """
linked = self._getPredictedLikes()
existing = set([self.pathToId(f) for f in self.walkDir(config.predictedLikesDir)]).difference([None])
removed = set(linked).difference(existing)
self.db.query("SELECT path FROM file_system WHERE id IN %s",(tuple(removed),))
removedPaths = [r[0] for r in self.db.cursor.fetchall()]
sys.stderr.write("Links to these files have been removed:\n\n")
for f in removedPaths: print(f)
for i in removed:
for by_id_path in config.byIdPaths:
path = os.path.join(by_id_path,i)
if os.path.exists(path): print(path)
def checkEmptyLikesDir(self):
pld = config.predictedLikesDir
if os.path.exists(pld) and len(os.listdir(pld)) > 0:
raise Exception("ERROR: %s is not empty!"%pld)
return True
class Main(IdsMixin,HTMLMixin):
def __init__(self):
self.db = ExtendedDB(commitOnClose=False)
DbPen = Main
if __name__ == "__main__":
Main()()
| gpl-3.0 | -3,747,976,984,683,073,000 | 37.298507 | 109 | 0.571025 | false |
vasily-v-ryabov/pywinauto-64 | pywinauto/RemoteMemoryBlock.py | 1 | 11194 | from __future__ import absolute_import
import ctypes, win32api, win32gui, win32con, pywintypes, win32process, sys, inspect, traceback
from . import win32functions
from . import win32defines
from . import win32structures
from . import sysinfo
class AccessDenied(RuntimeError):
"Raised when we cannot allocate memory in the control's process"
pass
#====================================================================
class RemoteMemoryBlock(object):
"Class that enables reading and writing memory in a different process"
#----------------------------------------------------------------
def __init__(self, handle, size = 4096): #4096): #16384):
"Allocate the memory"
self.memAddress = 0
self.size = size
self.process = 0
if handle == 0xffffffff80000000:
raise Exception('Incorrect handle: ' + str(handle))
self._as_parameter_ = self.memAddress
if sysinfo.is_x64_Python():
process_id = ctypes.c_ulonglong()
else:
process_id = ctypes.c_ulong()
win32functions.GetWindowThreadProcessId(
handle, ctypes.byref(process_id))
if not process_id.value:
raise AccessDenied(
str(ctypes.WinError()) + " Cannot get process ID from handle.")
# XXX: it doesn't work in some cases
#py_handle = pywintypes.HANDLE(handle.handle)
#(tid, pid) = win32process.GetWindowThreadProcessId(py_handle)
#self.process = win32api.OpenProcess(win32con.PROCESS_VM_OPERATION | win32con.PROCESS_VM_READ | win32con.PROCESS_VM_WRITE, 0, pid)
#print 'self.process.handle = ', self.process.handle
self.process = win32functions.OpenProcess(
win32defines.PROCESS_VM_OPERATION |
win32defines.PROCESS_VM_READ |
win32defines.PROCESS_VM_WRITE,
0,
process_id)
if not self.process:
raise AccessDenied(
str(ctypes.WinError()) + "process: %d",
process_id.value)
self.memAddress = win32functions.VirtualAllocEx(
ctypes.c_void_p(self.process), # remote process
ctypes.c_void_p(0), # let Valloc decide where
win32structures.ULONG_PTR(self.size + 4),# how much to allocate
win32defines.MEM_RESERVE |
win32defines.MEM_COMMIT, # allocation type
win32defines.PAGE_READWRITE # protection
)
if hasattr(self.memAddress, 'value'):
self.memAddress = self.memAddress.value
if self.memAddress == 0:
raise ctypes.WinError()
if hex(self.memAddress) == '0xffffffff80000000' or hex(self.memAddress).upper() == '0xFFFFFFFF00000000':
raise Exception('Incorrect allocation: ' + hex(self.memAddress))
self._as_parameter_ = self.memAddress
# write guard signature at the end of memory block
signature = win32structures.LONG(0x66666666)
ret = win32functions.WriteProcessMemory(
ctypes.c_void_p(self.process),
ctypes.c_void_p(self.memAddress + self.size),
ctypes.pointer(signature),
win32structures.ULONG_PTR(4),
win32structures.ULONG_PTR(0));
if ret == 0:
print('================== Error: Failed to write guard signature: address = ', self.memAddress, ', size = ', self.size)
last_error = win32api.GetLastError()
print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
sys.stdout.flush()
#----------------------------------------------------------------
def _CloseHandle(self):
"Close the handle to the process."
ret = win32functions.CloseHandle(self.process)
#win32api.CloseHandle(self.process)
if ret == 0:
#raise ctypes.WinError()
print('Error: cannot close process handle!')
#----------------------------------------------------------------
def CleanUp(self):
"Free Memory and the process handle"
if self.process != 0 and self.memAddress != 0:
# free up the memory we allocated
#win32api.SetLastError(0)
self.CheckGuardSignature()
ret = win32functions.VirtualFreeEx(
ctypes.c_void_p(self.process), ctypes.c_void_p(self.memAddress), win32structures.ULONG_PTR(0), win32structures.DWORD(win32defines.MEM_RELEASE))
if ret == 0:
print('Error: CleanUp: VirtualFreeEx() returned zero for address ', hex(self.memAddress))
last_error = win32api.GetLastError()
print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
sys.stdout.flush()
#win32gui.MessageBox(0, '2) VirtualFreeEx returned zero for address ' + str(hex(self.memAddress)), 'VirtualFreeEx failed!', win32con.MB_OK)
#self._CloseHandle()
raise ctypes.WinError()
self.memAddress = 0
#self._CloseHandle()
else:
print('\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')
#----------------------------------------------------------------
def __del__(self):
"Ensure that the memory is Freed"
# Free the memory in the remote process's address space
self.CleanUp()
#----------------------------------------------------------------
def Address(self):
"Return the address of the memory block"
return self.memAddress
#----------------------------------------------------------------
def Write(self, data, address = None, size = None):
"Write data into the memory block"
# write the data from this process into the memory allocated
# from the other process
if not address:
address = self.memAddress
if hasattr(address, 'value'):
address = address.value
if size:
nSize = win32structures.ULONG_PTR(size)
else:
nSize = win32structures.ULONG_PTR(ctypes.sizeof(data))
if self.size < nSize.value:
raise Exception('Write: RemoteMemoryBlock is too small (' + str(self.size) + ' bytes), ' + str(nSize.value) + ' is required.')
if hex(address).lower().startswith('0xffffff'):
raise Exception('Write: RemoteMemoryBlock has incorrect address = ' + hex(address))
ret = win32functions.WriteProcessMemory(
ctypes.c_void_p(self.process),
ctypes.c_void_p(address),
ctypes.pointer(data),
nSize,
win32structures.ULONG_PTR(0));
if ret == 0:
print('Error: Write failed: address = ', address)
last_error = win32api.GetLastError()
print('Error: LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
sys.stdout.flush()
#raise ctypes.WinError()
self.CheckGuardSignature()
#----------------------------------------------------------------
def Read(self, data, address = None, size = None):
"Read data from the memory block"
if not address:
address = self.memAddress
if hasattr(address, 'value'):
address = address.value
if size:
nSize = win32structures.ULONG_PTR(size)
else:
nSize = win32structures.ULONG_PTR(ctypes.sizeof(data))
if self.size < nSize.value:
raise Exception('Read: RemoteMemoryBlock is too small (' + str(self.size) + ' bytes), ' + str(nSize.value) + ' is required.')
if hex(address).lower().startswith('0xffffff'):
raise Exception('Read: RemoteMemoryBlock has incorrect address =' + hex(address))
lpNumberOfBytesRead = ctypes.c_size_t(0)
ret = win32functions.ReadProcessMemory(
ctypes.c_void_p(self.process),
ctypes.c_void_p(address),
ctypes.byref(data),
nSize,
ctypes.byref(lpNumberOfBytesRead))
# disabled as it often returns an error - but
# seems to work fine anyway!!
if ret == 0:
# try again
ret = win32functions.ReadProcessMemory(
ctypes.c_void_p(self.process),
ctypes.c_void_p(address),
ctypes.byref(data),
nSize,
ctypes.byref(lpNumberOfBytesRead))
if ret == 0:
last_error = win32api.GetLastError()
if last_error != win32defines.ERROR_PARTIAL_COPY:
print('\nError: Read: WARNING! self.memAddress =', self.memAddress, ' data address =', ctypes.byref(data))
print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
print('lpNumberOfBytesRead =', lpNumberOfBytesRead, ' nSize =', nSize)
print('Caller stack:')
for frame in inspect.stack():
print(frame[1:])
print()
sys.stdout.flush()
raise ctypes.WinError()
else:
print('Error: ERROR_PARTIAL_COPY')
print('\nRead: WARNING! self.memAddress =', self.memAddress, ' data address =', ctypes.byref(data))
print('lpNumberOfBytesRead =', lpNumberOfBytesRead, ' nSize =', nSize)
print('Caller stack:')
for frame in inspect.stack():
print('\t\t', frame[1:])
print()
sys.stdout.flush()
else:
print('Read OK: 2nd attempt!')
#else:
# print 'Read OK: lpNumberOfBytesRead =', lpNumberOfBytesRead, ' nSize =', nSize
self.CheckGuardSignature()
return data
#----------------------------------------------------------------
def CheckGuardSignature(self):
# read guard signature at the end of memory block
signature = win32structures.LONG(0)
lpNumberOfBytesRead = ctypes.c_size_t(0)
ret = win32functions.ReadProcessMemory(
ctypes.c_void_p(self.process),
ctypes.c_void_p(self.memAddress + self.size),
ctypes.pointer(signature), # 0x66666666
win32structures.ULONG_PTR(4),
ctypes.byref(lpNumberOfBytesRead));
if ret == 0:
print('Error: Failed to read guard signature: address = ', self.memAddress, ', size = ', self.size, ', lpNumberOfBytesRead = ', lpNumberOfBytesRead)
sys.stdout.flush()
#last_error = win32api.GetLastError()
#print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
raise ctypes.WinError()
else:
if hex(signature.value) != '0x66666666':
raise Exception('---------------------------------------- Error: read incorrect guard signature = ' + hex(signature.value))
| lgpl-2.1 | 882,680,462,412,986,000 | 42.053846 | 160 | 0.541987 | false |
Tong-Chen/scikit-learn | sklearn/metrics/metrics.py | 1 | 75466 | # -*- coding: utf-8 -*-
"""Utilities to evaluate the predictive performance of models
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_arrays
from ..utils import deprecated
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.fixes import bincount
###############################################################################
# General utilities
###############################################################################
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
y_true, y_pred = check_arrays(y_true, y_pred)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _check_clf_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d.
Parameters
----------
y_true : array-like,
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multilabel-sequences', \
'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix or sequence of sequences
y_pred : array or indicator matrix or sequence of sequences
"""
y_true, y_pred = check_arrays(y_true, y_pred, allow_lists=True)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator",
"multilabel-sequences"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
return y_type, y_true, y_pred
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
x, y = check_arrays(x, y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
class UndefinedMetricWarning(UserWarning):
pass
###############################################################################
# Classification metrics
###############################################################################
def hinge_loss(y_true, pred_decision, pos_label=None, neg_label=None):
"""Average hinge loss (non-regularized)
Assuming labels in y_true are encoded with +1 and -1, when a prediction
mistake is made, ``margin = y_true * pred_decision`` is always negative
(since the signs disagree), implying ``1 - margin`` is always greater than
1. The cumulated hinge loss is therefore an upper bound of the number of
mistakes made by the classifier.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='l2', multi_class='ovr', penalty='l2',
random_state=0, tol=0.0001, verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
"""
if pos_label is not None:
warnings.warn("'pos_label' is deprecated and will be removed in "
"release 0.15.", DeprecationWarning)
if neg_label is not None:
warnings.warn("'neg_label' is unused and will be removed in "
"release 0.15.", DeprecationWarning)
# TODO: multi-class hinge-loss
# the rest of the code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
if pos_label is not None:
y_true = (np.asarray(y_true) == pos_label) * 2 - 1
else:
y_true = LabelBinarizer(neg_label=-1).fit_transform(y_true)[:, 0]
margin = y_true * np.asarray(pred_decision)
losses = 1 - margin
# The hinge doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.mean(losses)
def average_precision_score(y_true, y_score, average="macro"):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score):
precision, recall, thresholds = precision_recall_curve(y_true, y_score)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average)
@deprecated("Function 'auc_score' has been renamed to "
"'roc_auc_score' and will be removed in release 0.16.")
def auc_score(y_true, y_score):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> auc_score(y_true, y_scores)
0.75
"""
return roc_auc_score(y_true, y_score)
def _average_binary_score(binary_metric, y_true, y_score, average):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Return
------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score)
y_true, y_score = check_arrays(y_true, y_score)
if average == "micro":
y_true = y_true.ravel()
y_score = y_score.ravel()
if average == 'weighted':
weights = np.sum(y_true, axis=0)
if weights.sum() == 0:
return 0
else:
weights = None
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
not_average_axis = 0 if average == 'samples' else 1
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c)
# Average the results
if average is not None:
return np.average(score, weights=weights)
else:
return score
def roc_auc_score(y_true, y_score, average="macro"):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score):
if len(np.unique(y_true)) != 2:
raise ValueError("ROC AUC score is not defined")
fpr, tpr, tresholds = roc_curve(y_true, y_score)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(_binary_roc_auc_score, y_true, y_score,
average)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def _binary_clf_curve(y_true, y_score, pos_label=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=1)
The label of the positive class
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
y_true, y_score = check_arrays(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# Sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = y_true.cumsum()[threshold_idxs]
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
pos_label : int
Label considered as positive and others are considered negative.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing false positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, y_score, pos_label)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] == 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] == 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = np.asarray(
coo_matrix(
(np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).todense()
)
return CM
def zero_one_loss(y_true, y_pred, normalize=True):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or list of labels or label indicator matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary indicator format:
>>> zero_one_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.ones((2, 2)))
0.5
and with a list of labels format:
>>> zero_one_loss([(1, ), (3, )], [(1, 2), tuple()])
1.0
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize)
if normalize:
return 1 - score
else:
n_samples = len(y_true)
return n_samples - score
def log_loss(y_true, y_pred, eps=1e-15, normalize=True):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the total loss.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clip and renormalize
Y = np.clip(y_pred, eps, 1 - eps)
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum()
return loss / T.shape[0] if normalize else loss
def jaccard_similarity_score(y_true, y_pred, normalize=True):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or list of labels or label indicator matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary indicator format:
>>> jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]),\
np.ones((2, 2)))
0.75
and with a list of labels format:
>>> jaccard_similarity_score([(1, ), (3, )], [(1, 2), tuple()])
0.25
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if y_type == 'multilabel-indicator':
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide"
# error here
y_pred_pos_label = y_pred == 1
y_true_pos_label = y_true == 1
pred_inter_true = np.sum(np.logical_and(y_pred_pos_label,
y_true_pos_label),
axis=1)
pred_union_true = np.sum(np.logical_or(y_pred_pos_label,
y_true_pos_label),
axis=1)
score = pred_inter_true / pred_union_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_union_true == 0.0] = 1.0
elif y_type == 'multilabel-sequences':
score = np.empty(len(y_true), dtype=np.float)
for i, (true, pred) in enumerate(zip(y_pred, y_true)):
true_set = set(true)
pred_set = set(pred)
size_true_union_pred = len(true_set | pred_set)
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
if size_true_union_pred == 0:
score[i] = 1.
else:
score[i] = (len(true_set & pred_set) /
size_true_union_pred)
else:
score = y_true == y_pred
if normalize:
return np.mean(score)
else:
return np.sum(score)
def accuracy_score(y_true, y_pred, normalize=True):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or list of labels or label indicator matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary indicator format:
>>> accuracy_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.ones((2, 2)))
0.5
and with a list of labels format:
>>> accuracy_score([(1, ), (3, )], [(1, 2), tuple()])
0.0
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if y_type == 'multilabel-indicator':
score = (y_pred != y_true).sum(axis=1) == 0
elif y_type == 'multilabel-sequences':
score = np.array([len(set(true) ^ set(pred)) == 0
for pred, true in zip(y_pred, y_true)])
else:
score = y_true == y_pred
if normalize:
return np.mean(score)
else:
return np.sum(score)
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='weighted'):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
labels : array
Integer array of labels.
pos_label : str or int, 1 by default
If ``average`` is not ``None`` and the classification target is binary,
only this class's scores will be returned.
average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)]
If ``None``, the scores for each class are returned. Otherwise,
unless ``pos_label`` is given in binary classification, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='weighted'):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : array
Integer array of labels.
pos_label : str or int, 1 by default
If ``average`` is not ``None`` and the classification target is binary,
only this class's scores will be returned.
average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)]
If ``None``, the scores for each class are returned. Otherwise,
unless ``pos_label`` is given in binary classification, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',))
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score')):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : array
Integer array of labels.
pos_label : str or int, 1 by default
If ``average`` is not ``None`` and the classification target is binary,
only this class's scores will be returned.
average : string, [None (default), 'micro', 'macro', 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
unless ``pos_label`` is given in binary classification, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array([0, 1, 2, 0, 1, 2])
>>> y_pred = np.array([0, 2, 1, 0, 0, 1])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
label_order = labels # save this for later
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
if y_type == 'multilabel-sequences':
y_true = label_binarize(y_true, labels, multilabel=True)
y_pred = label_binarize(y_pred, labels, multilabel=True)
else:
# set negative labels to zero
y_true = y_true == 1
y_pred = y_pred == 1
sum_axis = 1 if average == 'samples' else 0
tp_sum = np.sum(np.logical_and(y_true, y_pred), axis=sum_axis)
pred_sum = np.sum(y_pred, axis=sum_axis, dtype=int)
true_sum = np.sum(y_true, axis=sum_axis, dtype=int)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel"
"classification. See the accuracy_score instead.")
else:
lb = LabelEncoder()
lb.fit(labels)
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
labels = lb.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp_bins = y_true[y_true == y_pred]
if len(tp_bins):
tp_sum = bincount(tp_bins, minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, minlength=len(labels))
### Select labels to keep ###
if y_type == 'binary' and average is not None and pos_label is not None:
if pos_label not in labels:
if len(labels) == 1:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, labels))
pos_label_idx = labels == pos_label
tp_sum = tp_sum[pos_label_idx]
pred_sum = pred_sum[pos_label_idx]
true_sum = true_sum[pos_label_idx]
elif average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
else:
weights = None
if average is not None:
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
elif label_order is not None:
indices = np.searchsorted(labels, label_order)
precision = precision[indices]
recall = recall[indices]
f_score = f_score[indices]
true_sum = true_sum[indices]
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='weighted'):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
labels : array
Integer array of labels.
pos_label : str or int, 1 by default
If ``average`` is not ``None`` and the classification target is binary,
only this class's scores will be returned.
average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)]
If ``None``, the scores for each class are returned. Otherwise,
unless ``pos_label`` is given in binary classification, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',))
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='weighted'):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
labels : array
Integer array of labels.
pos_label : str or int, 1 by default
If ``average`` is not ``None`` and the classification target is binary,
only this class's scores will be returned.
average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)]
If ``None``, the scores for each class are returned. Otherwise,
unless ``pos_label`` is given in binary classification, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',))
return r
def classification_report(y_true, y_pred, labels=None, target_names=None):
"""Build a text report showing the main classification metrics
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) target values.
y_pred : array-like or list of labels or label indicator matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading))
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.2f}".format(v)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.2f}".format(v)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Parameters
----------
y_true : array-like or list of labels or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or list of labels or label indicator matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary indicator format:
>>> hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.zeros((2, 2)))
0.75
and with a list of labels format:
>>> hamming_loss([(1, 2), (3, )], [(1, 2), tuple()]) # doctest: +ELLIPSIS
0.166...
"""
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type == 'multilabel-indicator':
return np.mean(y_true != y_pred)
elif y_type == 'multilabel-sequences':
loss = np.array([len(set(pred).symmetric_difference(true))
for pred, true in zip(y_pred, y_true)])
return np.mean(loss) / np.size(classes)
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
###############################################################################
# Regression metrics
###############################################################################
def mean_absolute_error(y_true, y_pred):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.mean(np.abs(y_pred - y_true))
def mean_squared_error(y_true, y_pred):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.mean((y_pred - y_true) ** 2)
###############################################################################
# Regression score functions
###############################################################################
def explained_variance_score(y_true, y_pred):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
numerator = np.var(y_true - y_pred)
denominator = np.var(y_true)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
numerator = ((y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = ((y_true - y_true.mean(axis=0)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
| bsd-3-clause | 3,345,353,880,831,719,400 | 33.680607 | 79 | 0.594328 | false |
syrgak/newrelic-plugin-varnish | plugin.py | 1 | 3312 | #!/usr/bin/env python
"""
Varnish Plugin for NewRelic
"""
from xml.etree import ElementTree
import helper
import logging
import subprocess
import json
import time
import requests
class NewRelicVarnishPlugin(helper.Controller):
"""
The NewRelicVarnish plugin polls varnishstat utility for stats and reports to NewRelic
"""
def __init__(self, args, operating_system):
super(NewRelicVarnish, self).__init__(args, operating_system)
def setup(self):
self.http_headers['X-License-Key'] = self.license_key
@property
def http_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json'}
@property
def license_key(self):
return self.config.application.license_key
def process(self):
"""
This method is called by super class (helper.Controller) every sleep interval
"""
logging.info("Process")
data = self.fetch()
self.send(data)
def get_varnish_stats(self):
command = subprocess.Popen(['varnishstat', '-1', '-x'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err is not None:
error_msg = 'Failed to fetch varnishstats'.join([err])
logging.error(error_msg);
raise Exception(error_msg)
return out
def parse(self, output):
result = []
try:
stats = ElementTree.XML(output)
expect Exception:
raise
for stat in stats.iter(tag='stat'):
metrics = []
for prop in stat:
metrics.append((prop.tag, prop.text))
result.append(dict(metrics))
return result
def package_stats(self, stats):
components = {
'name': self.app_name,
'guid': self.guid,
'duration': self.duration,
'metrics': stats }
body = { 'agent': self.agent_data, 'components': components }
return body
def fetch(self):
try:
xml = self.get_varnish_stats()
stats = self.parse(xml)
except Exception as inst:
raise
return self.package_stats(stats)
def send(self, package):
try:
response = requests.post(self.endpoint,
headers=self.http_headers,
proxies=self.proxies,
data=json.dumps(package, ensure_ascii=False),
timeout=self.config.get('newrelic_api_timeout', 10),
verify=self.config.get('verify_ssl_cert', True)
except requests.ConnectionError as error:
logging.error('Error contacting NewRelic server: %s', error)
except requests.Timeout as error:
logging.error('Timed out contacting NewRelic server: %s', error)
def main():
helper.parser.description('The Varnish Plugin for NewRelic polls varnishstat '
'for status and sends the data to the NewRelic '
'Platform')
helper.parser.name('newrelic_varnish_plugin')
argparse = helper.parser.get()
argparse.add_argument('-C',
action='store_true',
dest='configure',
help='Run interactive configuration')
args = helper.parser.parse()
if args.configure:
print('Configuration')
sys.exit(0)
helper.start(NewRelicVarnishPlugin)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| mit | 1,659,755,942,076,388,000 | 24.875 | 88 | 0.624698 | false |
open-machine-learning/mldata-utils | scripts/voc2005class.py | 1 | 2445 | """
Parser for voc 2005 images
reads ImageSet directory and gets classes of images
than writes it to arff file
"""
import re, os
reg_rect = "\"(?P<name>.*)\" \(.*\) : \((?P<xmin>[0-9]*), (?P<ymin>[0-9]*)\) - \((?P<xmax>[0-9]*), (?P<ymax>[0-9]*)\)"
reg_filename = "filename : \"(?P<filename>.*)\""
res = {}
FILES = list(range(0,10000))
T = {}
MAP = {
'PASposterClutter': 'dummy',
'PASbicycle': 'bicycle',
'PASskyRegion': 'dummy',
'PASbuildingPart': 'dummy',
'PASbicycleSide': 'bicycle',
'PAStreePart': 'dummy',
'PASpersonSitting': 'person',
'PASstreet': 'dummy',
'PASmotorbike': 'motorbike',
'PASwindow': 'dummy',
'PAScarPart': 'car',
'PAStreeWhole': 'dummy',
'PAStrafficlight': 'dummy',
'PASbuildingWhole': 'dummy',
'PAStrash': 'dummy',
'PAStreeRegion': 'dummy',
'PASstreetlight': 'dummy',
'PAScarRear': 'car',
'PASstreetSign': 'dummy',
'PASposter': 'dummy',
'PASdoor': 'dummy',
'PASmotorbikeSide': 'motorbike',
'PASstopSign': 'dummy',
'PASbuilding': 'dummy',
'PAScarSide': 'car',
'PAScarFrontal': 'car',
'PAScar': 'car',
'PAStree': 'dummy',
'PASpersonStanding': 'dummy',
'PASpersonWalking': 'dummy',
'PASperson': 'person',
'PASbuildingRegion': 'dummy'
}
def parse_labels(filename):
f = open(filename)
imgfile = ""
for line in f.readlines():
m = re.search(reg_filename, line)
if m:
imgfile = m.group('filename')
m = re.search(reg_rect, line)
if m:
name = m.group('name')
if imgfile not in res:
res[imgfile] = {}
# if not res[cls].has_key(imgfile):
res[imgfile][MAP[name]] = 1
T[MAP[name]] = 1
for dirname in os.listdir("."):
if (not os.path.isdir(dirname)) or (len(dirname) < 3):
continue
for filename in os.listdir(dirname):
if filename.split(".")[1] == "txt":
parse_labels(dirname + "/" + filename)
print("@relation classification\n")
print("@attribute image string")
CLSS = [key for key in list(T.keys()) if not key == "dummy"]
for key in CLSS:
print("@attribute %s numeric" % (key))
print("\n@data")
for key in list(res.keys()):
line = "'" + key + "',"
for cls in CLSS:
if cls in res[key]:
line += res[key][cls].__str__() + ","
else:
line += "-1,"
line = line[:-1]
print(line)
| gpl-3.0 | 7,142,772,437,603,581,000 | 26.166667 | 118 | 0.546012 | false |
ohel/pyorbital-gizmod-tweaks | gizmod_modules/104-USB-Remote-mpv.py | 1 | 3658 | from GizmoDaemon import *
from GizmoScriptDefault import *
import os
import ReadSymLink
ENABLED = True
VERSION_NEEDED = 3.2
INTERESTED_CLASSES = [GizmoEventClass.Standard]
MP_CONTROL_FILE = "/dev/shm/mpvfifo"
REMOTE_DEVICE = ReadSymLink.readlinkabs("/dev/input/remote")
class USBRemoteMP(GizmoScriptDefault):
"""
USB Remote control
"""
def onEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
self.returnvalue = False
if Event.Class == GizmoEventClass.Standard and Event.Type == GizmoEventType.EV_KEY and Gizmo.FileName == REMOTE_DEVICE and os.popen("ps -e | grep mpv | head -n 1").read():
controlfile = open(MP_CONTROL_FILE, 'w')
if str(Event.Code) == "KEY_PLAYPAUSE" and Event.Value == 1:
controlfile.write("pause\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_STOPCD" and Event.Value == 1:
controlfile.write("frame_step\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_TAB":
if Event.Value == 0 and self.previous_value != 2:
controlfile.write("seek_chapter +1\n")
else:
controlfile.write("seek +2\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_BACKSPACE":
if Event.Value == 0 and self.previous_value != 2:
controlfile.write("seek_chapter -1\n")
else:
controlfile.write("seek -2\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_PAGEUP" and Event.Value == 1:
controlfile.write("audio_delay 0.1\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_PAGEDOWN" and Event.Value != 0:
controlfile.write("audio_delay -0.1\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_MAIL" and Event.Value == 1:
controlfile.write("sub_select\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_LEFTMETA" and Event.Value == 1:
controlfile.write("osd_show_progression\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_HOMEPAGE" and Event.Value == 1:
controlfile.write("vo_fullscreen\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_MUTE" and Event.Value == 1:
controlfile.write("switch_audio\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_VOLUMEUP" and Event.Value != 0:
controlfile.write("volume +2\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_VOLUMEDOWN" and Event.Value != 0:
controlfile.write("volume -2\n")
self.returnvalue = True
elif str(Event.Code) == "KEY_UP" and Event.Value == 1:
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_KP8)
self.returnvalue = True
elif str(Event.Code) == "KEY_DOWN" and Event.Value == 1:
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_KP2)
self.returnvalue = True
elif str(Event.Code) == "KEY_LEFT" and Event.Value == 1:
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_KP4)
self.returnvalue = True
elif str(Event.Code) == "KEY_RIGHT" and Event.Value == 1:
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_KP6)
self.returnvalue = True
elif str(Event.Code) == "KEY_ENTER" and Event.Value == 1:
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER)
self.returnvalue = True
controlfile.close()
self.previous_value = Event.Value
return self.returnvalue
else:
return False
def __init__(self):
"""
Default Constructor
"""
self.returnvalue = False
self.previous_value = 0
GizmoScriptDefault.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES)
USBRemoteMP()
| unlicense | 4,115,400,300,158,171,600 | 36.326531 | 173 | 0.68152 | false |
misdoro/python-ase | tools/_ase_bash_complete.py | 1 | 2250 | #!/usr/bin/env python
"""Bash completion for ase-db, ase-run, ase-build, ase-info and ase-gui.
Put this in your .bashrc::
complete -o default -C _ase_bash_complete.py ase-db ase-run \
ase-build ase-info ase-gui
"""
import os
import sys
from glob import glob
command, word, previous = sys.argv[1:]
line = os.environ['COMP_LINE']
point = int(os.environ['COMP_POINT'])
def options(short, long):
return ['-' + s for s in short] + ['--' + l for l in long.split()]
def match(word, *suffixes):
return [w for w in glob(word + '*')
if any(w.endswith(suffix) for suffix in suffixes)]
words = []
if command == 'ase-db':
if word[:1] == '-':
words = options(
'hvqnliakycspwLj',
'help verbose quiet count long insert-into add-from-file '
'add-key-value-pairs limit offset delete '
'delete-keys yes columns sort cut python csv '
'open-web-browser json unique')
elif previous == 'ase-db':
words = match(word, '.db', '.json')
elif command == 'ase-run':
if word[:1] == '-':
words = options(
'htpdSfsEic',
'help tag parameter database skip properties maximum-force '
'constrain-tags maximum-stress equation-of-state modify after')
elif previous == 'ase-run':
from ase.calculators.calculator import names as words
elif command == 'ase-build':
if previous in ['-x', '--crystal-structure']:
words = ['sc', 'fcc', 'bcc', 'hcp', 'diamond', 'zincblende',
'rocksalt', 'cesiumchloride', 'fluorite', 'wurtzite']
elif word[:1] == '-':
words = options(
'hMvxarg',
'help magnetic-moment modify vacuum unit-cell bond-length '
'crystal-structure lattice-constant orthorhombic cubic repeat gui')
elif command == 'ase-info':
if word[:1] == '-':
words = options('h', 'help')
else:
words = match(word, '.traj')
else: # ase-gui
if word[:1] == '-':
words = options(
'hnurRogtbs', 'help image-number show-unit-cell repeat '
'rotations output graph terminal aneb interpolate bonds scale')
for w in words:
if w.startswith(word):
print(w)
| gpl-2.0 | 4,504,443,122,918,439,400 | 31.142857 | 79 | 0.582222 | false |
MicheleZini/lastTable | betterlastTable.py | 1 | 2620 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
##
# Python function for reading linux utmp/wtmp file
# http://www.likexian.com/
#
# Copyright 2014, Kexian Li
# Released under the Apache License, Version 2.0
#
##
import struct
XTMP_STRUCT = 'hi32s4s32s256shhiii4i20x'
XTMP_STRUCT_SIZE = struct.calcsize(XTMP_STRUCT)
def read_xtmp(fname):
result = []
fp = open(fname, 'rb')
while True:
bytes = fp.read(XTMP_STRUCT_SIZE)
if not bytes:
break
data = struct.unpack(XTMP_STRUCT, bytes)
data = [(lambda s: str(s).split("\0", 1)[0])(i) for i in data]
if data[0] != '0':
result.append(data)
fp.close()
result.reverse()
return result
def printtimeline():
from datetime import datetime as dt
info = {'1':[4,5],'2':[4,5],'5':[1,2],'6':[4,5],'7':[1,4,7],'8':[1,4,6,7]}
name = {'1':'runlv','2':' boot','5':' init','6':'login','7':' proc','8':' term'}
for ev in events:
addinfo = ''
for i in info[ev[0]]:
addinfo+=' - '+ev[i]
print "%s (%6s) [%s]%s" % (dt.fromtimestamp(int(ev[9])).strftime('%Y/%m/%d %H:%M:%S'),ev[10],name[ev[0]],addinfo)
import sys
wtmp = "/var/log/wtmp"
timeline = True
try:
print '* accessing logs in: %s' % wtmp
events = read_xtmp(wtmp)
except IOError as e:
print "- failed: %s" % e
sys.exit(1)
print "+ %i events found" % len(events)
runlv = {} #define RUN_LVL 1 /* Change in system run-level (see init(8)) */
boot = {} #define BOOT_TIME 2 /* Time of system boot (in ut_tv) */
init = {} #define INIT_PROCESS 5 /* Process spawned by init(8) */
login = {} #define LOGIN_PROCESS 6 /* Session leader process for user login */
proc = {} #define USER_PROCESS 7 /* Normal process */
term = {} #define DEAD_PROCESS 8 /* Terminated process */
others = 0
for event in events:
pid = event[9]
if event[0] == '1':
runlv[pid] = event
continue
if event[0] == '2':
boot[pid] = event
continue
if event[0] == '5':
init[pid] = event
continue
if event[0] == '6':
login[pid] = event
continue
if event[0] == '7':
proc[pid] = event
continue
if event[0] == '8':
term[pid] = event
continue
others += 1
tot = len(init)+len(login)+len(proc)+len(runlv)+len(boot)+len(term)
print " init: %4i | login: %4i | proc: %4i" % (len(init), len(login), len(proc))
print " runlv: %4i | boot: %4i | term: %4i" % (len(runlv), len(boot), len(term))
print " Tot = %5i + not significant: %4i " % (tot, others)
if timeline:
printtimeline()
| mit | -4,841,995,086,821,038,000 | 24.940594 | 118 | 0.558397 | false |
clic-lab/blocks | BlockWorldRoboticAgent/model/q_network.py | 1 | 5707 | import embed_token_seq
import image_preprocessing
import embed_image
import mix_and_gen_q_values
import embed_previous_action as epa
import tensorflow as tf
import numpy as np
class ActionValueFunctionNetwork:
""" Creates policy Q(s,a) that approximates Q values
over actions for a given observed state s. """
# def __init__(self, image_dim, scope_name="Q_network"):
def __init__(self, n_text, image_dim, n_image,
n_direction_dim, n_block_dim, scope_name="Q_network"):
# Neural network for embedding text
self.n_text = n_text
self.text_embedder = embed_token_seq.EmbedTokenSeq(self.n_text, scope_name=scope_name)
text_embedding = self.text_embedder.get_output()
####################
# Create bucket network
self.buckets = [15, 30, 45]
self.embed_token_seq_buckets = []
for bucket in self.buckets:
embed_token_seq_bucket = \
embed_token_seq.EmbedTokenSeq(self.n_text, num_steps=bucket, create_copy=self.text_embedder,
scope_name=scope_name)
self.embed_token_seq_buckets.append(embed_token_seq_bucket)
####################
# Image Preprocessing
self.image_preprocessor = image_preprocessing.ImagePreprocessing()
# Neural network for embedding image
self.n_image = n_image
self.image_embedder = embed_image.EmbedImage(self.n_image, image_dim, scope_name=scope_name)
image_embedding = self.image_embedder.get_output()
# Network for embedding past action
# 6 actions, one for no-action
self.n_direction_dim = n_direction_dim
self.n_blocks_dim = n_block_dim
self.n_previous_action_embedding = self.n_direction_dim + self.n_blocks_dim
self.null_previous_action = (5, 20)
self.previous_action_embedder = epa.EmbedPreviousAction(
6, self.n_direction_dim, 21, self.n_blocks_dim, scope_name=scope_name)
previous_action_embedding = self.previous_action_embedder.get_output()
# Neural network for mixing the embeddings of text, image and previous action and generate q values
self.mix_and_gen_q_val = mix_and_gen_q_values.MixAndGenerateQValues(
self.n_text, self.n_image, self.n_previous_action_embedding,
text_embedding, image_embedding, previous_action_embedding, 81, scope_name=scope_name)
####################
# TODO BUG
self.mix_and_gen_q_val_buckets = []
for i in range(0, len(self.buckets)):
mix_and_gen_q_val_bucket = mix_and_gen_q_values.MixAndGenerateQValues(
self.n_text, self.n_image, self.n_previous_action_embedding,
self.embed_token_seq_buckets[i].get_output(), image_embedding,
previous_action_embedding, 81, create_copy=self.mix_and_gen_q_val, scope_name=scope_name)
self.mix_and_gen_q_val_buckets.append(mix_and_gen_q_val_bucket)
####################
# Define input and output
self.target = tf.placeholder(dtype=tf.float32, shape=None)
self.model_output = self.mix_and_gen_q_val.get_q_val()
self.model_output_indices = tf.placeholder(dtype=tf.int32, shape=None)
summary_qval_min = tf.scalar_summary("Q Val Min", tf.reduce_min(self.model_output))
summary_qval_max = tf.scalar_summary("Q Val Max", tf.reduce_max(self.model_output))
summary_qval_mean = tf.scalar_summary("Q Val Mean", tf.reduce_mean(self.model_output))
self.feed_forward_summary = tf.merge_summary([summary_qval_min, summary_qval_max, summary_qval_mean])
self.feed_iter = 0
def get_bucket_network(self, num_tokens):
for i in range(0, len(self.buckets)):
if num_tokens <= self.buckets[i]:
return self.mix_and_gen_q_val_buckets[i], self.embed_token_seq_buckets[i]
return self.mix_and_gen_q_val, self.text_embedder
def evaluate_qfunction(self, image_data, text_input_word_indices, text_mask, previous_action, sess):
mix_and_gen_q_val_bucket, text_embedder_bucket = self.get_bucket_network(sum(text_mask[0]))
image_data = np.concatenate(list(image_data), 2)
q_val = mix_and_gen_q_val_bucket.get_q_val()
raw_image_input = self.image_preprocessor.get_raw_image_input()
final_image_output = self.image_preprocessor.get_final_image()
image_datas = [final_image_output.eval(session=sess, feed_dict={raw_image_input: image_data})]
image_placeholder = self.image_embedder.get_images_data()
text_input = text_embedder_bucket.get_input()
mask = text_embedder_bucket.get_zero_mask()
batch_size = text_embedder_bucket.get_batch_size()
direction_input, block_input = self.previous_action_embedder.get_input()
result = sess.run(q_val, feed_dict={text_input: text_input_word_indices,
mask: text_mask, batch_size: 1,
image_placeholder: [image_datas],
direction_input: [previous_action[0]],
block_input: [previous_action[1]]})
# self.train_writer.add_summary(result[1], self.feed_iter)
# self.feed_iter += 1
return result[0]
def get_action_values(self, image_data, text_input_word_indices, text_mask, previous_action, sess):
""" Return Q value for the given agent state. """
q_values = self.evaluate_qfunction(
image_data, text_input_word_indices, text_mask, previous_action, sess)
return q_values
| gpl-3.0 | -5,442,815,506,399,176,000 | 46.558333 | 109 | 0.622569 | false |
BlackHole/enigma2-obh10 | lib/python/PowerTimer.py | 2 | 23520 | import os
from boxbranding import getMachineBrand, getMachineName
import xml.etree.cElementTree
from time import ctime, time
from bisect import insort
from enigma import eActionMap, quitMainloop
from Components.config import config
from Components.Harddisk import internalHDDNotSleeping
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications
from Tools.XMLTools import stringToXML
from timer import Timer, TimerEntry
import NavigationInstance
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev):
begin = ev.getBeginTime()
end = begin + ev.getDuration()
return begin, end
class AFTEREVENT:
def __init__(self):
pass
NONE = 0
WAKEUPTOSTANDBY = 1
STANDBY = 2
DEEPSTANDBY = 3
class TIMERTYPE:
def __init__(self):
pass
NONE = 0
WAKEUP = 1
WAKEUPTOSTANDBY = 2
AUTOSTANDBY = 3
AUTODEEPSTANDBY = 4
STANDBY = 5
DEEPSTANDBY = 6
REBOOT = 7
RESTART = 8
# please do not translate log messages
class PowerTimerEntry(TimerEntry, object):
def __init__(self, begin, end, disabled=False, afterEvent=AFTEREVENT.NONE, timerType=TIMERTYPE.WAKEUP, checkOldTimers=False):
TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
self.dontSave = False
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.timerType = timerType
self.afterEvent = afterEvent
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.autosleepinstandbyonly = 'no'
self.autosleepdelay = 60
self.autosleeprepeat = 'once'
self.resetState()
def __repr__(self):
timertype = {
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timerType]
if not self.disabled:
return "PowerTimerEntry(type=%s, begin=%s)" % (timertype, ctime(self.begin))
else:
return "PowerTimerEntry(type=%s, begin=%s Disabled)" % (timertype, ctime(self.begin))
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
def do_backoff(self):
#
# back-off an auto-repeat timer by its autosleepdelay, not 5, 10, 20, 30 mins
#
if self.autosleeprepeat == "repeated" and self.timerType in (TIMERTYPE.AUTOSTANDBY, TIMERTYPE.AUTODEEPSTANDBY):
self.backoff = int(self.autosleepdelay) * 60
elif self.backoff == 0:
self.backoff = 5 * 60
else:
self.backoff *= 2
if self.backoff > 1800:
self.backoff = 1800
self.log(10, "backoff: retry in %d minutes" % (int(self.backoff) / 60))
#
# If this is the first backoff of a repeat timer remember the original
# begin/end times, so that we can use *these* when setting up the repeat.
#
if self.repeated != 0 and not hasattr(self, "real_begin"):
self.real_begin = self.begin
self.real_end = self.end
# Delay the timer by the back-off time
#
self.begin = time() + self.backoff
if self.end <= self.begin:
self.end = self.begin
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == self.StatePrepared and (self.timerType == TIMERTYPE.AUTOSTANDBY or self.timerType == TIMERTYPE.AUTODEEPSTANDBY):
# This is the first action for an auto* timer.
# It binds any key press to keyPressed(), which resets the timer delay,
# and sets the initial delay.
#
eActionMap.getInstance().bindAction('', -0x7FFFFFFF, self.keyPressed)
self.begin = time() + int(self.autosleepdelay) * 60
if self.end <= self.begin:
self.end = self.begin
if next_state == self.StatePrepared:
self.log(6, "prepare ok, waiting for begin")
self.next_activation = self.begin
self.backoff = 0
return True
elif next_state == self.StateRunning:
self.wasPowerTimerWakeup = False
if os.path.exists("/tmp/was_powertimer_wakeup"):
self.wasPowerTimerWakeup = int(open("/tmp/was_powertimer_wakeup", "r").read()) and True or False
os.remove("/tmp/was_powertimer_wakeup")
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.failed:
return True
if self.timerType == TIMERTYPE.WAKEUP:
if Screens.Standby.inStandby:
Screens.Standby.inStandby.Power()
return True
elif self.timerType == TIMERTYPE.WAKEUPTOSTANDBY:
return True
elif self.timerType == TIMERTYPE.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithUniqueIDCallback(self.sendStandbyNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout=180)
return True
elif self.timerType == TIMERTYPE.AUTOSTANDBY:
if NavigationInstance.instance.getCurrentlyPlayingServiceReference() and ('0:0:0:0:0:0:0:0:0' in NavigationInstance.instance.getCurrentlyPlayingServiceReference().toString() or '4097:' in NavigationInstance.instance.getCurrentlyPlayingServiceReference().toString()):
self.do_backoff()
# retry
return False
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithUniqueIDCallback(self.sendStandbyNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout=180)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
else:
self.begin = time() + int(self.autosleepdelay) * 60
if self.end <= self.begin:
self.end = self.begin
else:
self.begin = time() + int(self.autosleepdelay) * 60
if self.end <= self.begin:
self.end = self.begin
elif self.timerType == TIMERTYPE.AUTODEEPSTANDBY:
# Check for there being any active Movie playback or IPTV channel
# or any streaming clients before going to Deep Standby.
# However, it is possible to put the box into Standby with the
# MoviePlayer still active (it will play if the box is taken out
# of Standby) - similarly for the IPTV player. This should not
# prevent a DeepStandby
# And check for existing or imminent recordings, etc..
# Also added () around the test and split them across lines
# to make it clearer what each test is.
#
from Components.Converter.ClientsStreaming import ClientsStreaming
if ((not Screens.Standby.inStandby and NavigationInstance.instance.getCurrentlyPlayingServiceReference() and
('0:0:0:0:0:0:0:0:0' in NavigationInstance.instance.getCurrentlyPlayingServiceReference().toString() or
'4097:' in NavigationInstance.instance.getCurrentlyPlayingServiceReference().toString()
) or
(int(ClientsStreaming("NUMBER").getText()) > 0)
) or
(NavigationInstance.instance.RecordTimer.isRecording() or
abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or
abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900) or
(self.autosleepinstandbyonly == 'yes' and not Screens.Standby.inStandby) or
(self.autosleepinstandbyonly == 'yes' and Screens.Standby.inStandby and internalHDDNotSleeping()
)
):
self.do_backoff()
# retry
return False
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
quitMainloop(1)
return True
else:
Notifications.AddNotificationWithUniqueIDCallback(self.sendTryQuitMainloopNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout=180)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
else:
self.begin = time() + int(self.autosleepdelay) * 60
if self.end <= self.begin:
self.end = self.begin
elif self.timerType == TIMERTYPE.DEEPSTANDBY and self.wasPowerTimerWakeup:
return True
elif self.timerType == TIMERTYPE.DEEPSTANDBY and not self.wasPowerTimerWakeup:
if NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900:
self.do_backoff()
# retry
return False
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
quitMainloop(1)
else:
Notifications.AddNotificationWithUniqueIDCallback(self.sendTryQuitMainloopNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout=180)
return True
elif self.timerType == TIMERTYPE.REBOOT:
if NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900:
self.do_backoff()
# retry
return False
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
quitMainloop(2)
else:
Notifications.AddNotificationWithUniqueIDCallback(self.sendTryToRebootNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to reboot your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout=180)
return True
elif self.timerType == TIMERTYPE.RESTART:
if NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900:
self.do_backoff()
# retry
return False
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
quitMainloop(3)
else:
Notifications.AddNotificationWithUniqueIDCallback(self.sendTryToRestartNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to restart the user interface.\nDo that now?"), timeout=180)
return True
elif next_state == self.StateEnded:
old_end = self.end
NavigationInstance.instance.PowerTimer.saveTimer()
if self.afterEvent == AFTEREVENT.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithUniqueIDCallback(self.sendStandbyNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout=180)
elif self.afterEvent == AFTEREVENT.DEEPSTANDBY:
if NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900:
self.do_backoff()
# retry
return False
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
quitMainloop(1)
else:
Notifications.AddNotificationWithUniqueIDCallback(self.sendTryQuitMainloopNotification, "PT_StateChange", MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout=180)
return True
def setAutoincreaseEnd(self, entry=None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = PowerTimerEntry(self.begin, new_end, disabled=True, afterEvent=self.afterEvent, timerType=self.timerType)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.PowerManager.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def sendStandbyNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def sendTryToRebootNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 2)
def sendTryToRestartNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 3)
def keyPressed(self, key, tag):
self.begin = time() + int(self.autosleepdelay) * 60
if self.end <= self.begin:
self.end = self.begin
def getNextActivation(self):
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end}[next_state]
def getNextWakeup(self):
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
if self.timerType != TIMERTYPE.WAKEUP and self.timerType != TIMERTYPE.WAKEUPTOSTANDBY and not self.afterEvent:
return -1
elif self.timerType != TIMERTYPE.WAKEUP and self.timerType != TIMERTYPE.WAKEUPTOSTANDBY and self.afterEvent:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end}[next_state]
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) > 60 and int(old_prepare) != int(self.start_prepare):
self.log(15, "time changed, start prepare is now: %s" % ctime(self.start_prepare))
def createTimer(xml):
timertype = str(xml.get("timertype") or "wakeup")
timertype = {
"wakeup": TIMERTYPE.WAKEUP,
"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
"autostandby": TIMERTYPE.AUTOSTANDBY,
"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
"standby": TIMERTYPE.STANDBY,
"deepstandby": TIMERTYPE.DEEPSTANDBY,
"reboot": TIMERTYPE.REBOOT,
"restart": TIMERTYPE.RESTART
}[timertype]
begin = int(xml.get("begin"))
end = int(xml.get("end"))
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY
}[afterevent]
autosleepinstandbyonly = str(xml.get("autosleepinstandbyonly") or "no")
autosleepdelay = str(xml.get("autosleepdelay") or "0")
autosleeprepeat = str(xml.get("autosleeprepeat") or "once")
#
# If this is a repeating auto* timer then start it in 30 secs,
# which means it will start its repeating countdown from when enigma2
# starts each time rather then waiting until anything left over from the
# last enigma2 running.
#
if autosleeprepeat == "repeated":
begin = end = time() + 30
entry = PowerTimerEntry(begin, end, disabled, afterevent, timertype)
entry.autosleepinstandbyonly = autosleepinstandbyonly
entry.autosleepdelay = int(autosleepdelay)
entry.autosleeprepeat = autosleeprepeat
# Ensure that the timer repeated is cleared if we have an autosleeprepeat
if entry.autosleeprepeat == "repeated":
entry.repeated = 0
else:
entry.repeated = int(repeated)
for l in xml.findall("log"):
ltime = int(l.get("time"))
lcode = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((ltime, lcode, msg))
return entry
class PowerTimer(Timer):
def __init__(self):
Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "pm_timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w, dosave=True):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = PowerTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
try:
self.timer_list.remove(w)
except:
print '[PowerManager]: Remove list failed'
# did this timer reached the last state?
if w.state < PowerTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
# If we have saved original begin/end times for a backed off timer
# restore those values now
if hasattr(w, "real_begin"):
w.begin = w.real_begin
w.end = w.real_end
# Now remove the temporary holding attributes...
del w.real_begin
del w.real_end
w.processRepeated()
w.state = PowerTimerEntry.StateWaiting
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value, config.recording.keep_finished_timer_logs.value)
insort(self.processed_timers, w)
self.stateChanged(w)
if dosave:
self.saveTimer()
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
file = open(self.Filename, 'r')
doc = xml.etree.cElementTree.parse(file)
file.close()
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (pm_timers.xml) is corrupt and could not be loaded."), type=MessageBox.TYPE_ERROR, timeout=0, id="TimerLoadFailed")
print "pm_timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "pm_timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in pm_timers.xml detected!\nPlease recheck it!"), type=MessageBox.TYPE_ERROR, timeout=0, id="TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
timerTypes = {
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}
afterEvents = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.WAKEUPTOSTANDBY: "wakeuptostandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby"
}
list = ['<?xml version="1.0" ?>\n<timers>\n']
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer'
' timertype="%s"'
' begin="%d"'
' end="%d"'
' repeated="%d"'
' afterevent="%s"'
' disabled="%d"'
' autosleepinstandbyonly="%s"'
' autosleepdelay="%s"'
' autosleeprepeat="%s"' % (
timerTypes[timer.timerType],
int(timer.begin),
int(timer.end),
int(timer.repeated),
afterEvents[timer.afterEvent],
int(timer.disabled),
timer.autosleepinstandbyonly,
timer.autosleepdelay,
timer.autosleeprepeat))
if len(timer.log_entries) == 0:
list.append('/>\n')
else:
for log_time, code, msg in timer.log_entries:
list.append('>\n<log code="%d" time="%d">%s</log' % (code, log_time, stringToXML(msg)))
list.append('>\n</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
file.writelines(list)
file.flush()
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if timer.begin < now:
continue
return timer.begin
return -1
def getNextPowerManagerTimeOld(self):
now = time()
for timer in self.timer_list:
if timer.timerType != TIMERTYPE.AUTOSTANDBY and timer.timerType != TIMERTYPE.AUTODEEPSTANDBY:
next_act = timer.getNextWakeup()
if next_act < now:
continue
return next_act
return -1
def getNextPowerManagerTime(self):
nextrectime = self.getNextPowerManagerTimeOld()
faketime = time() + 300
if config.timeshift.isRecording.value:
if 0 < nextrectime < faketime:
return nextrectime
else:
return faketime
else:
return nextrectime
def isNextPowerManagerAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.timerType == TIMERTYPE.WAKEUPTOSTANDBY or timer.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): #wird von loadTimer mit dosave=False aufgerufen
entry.timeChanged()
print "[PowerTimer]", str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def removeEntry(self, entry):
print "[PowerTimer] Remove", str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
# print "state: ", entry.state
# print "in processed: ", entry in self.processed_timers
# print "in running: ", entry in self.timer_list
# disable timer first
if entry.state != 3:
entry.disable()
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
if entry in self.processed_timers:
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
def cleanupDaily(self, days, finishedLogDays=None):
Timer.cleanupDaily(self, days, finishedLogDays)
if days > 0:
now = time()
keepThreshold = now - days * 86400
for entry in self.timer_list:
if str(entry.autosleeprepeat) == "repeated":
# Handle repeat entries, which never end
# Repeating timers get autosleeprepeat="repeated" as well as the cases handled by TimerEntry
entry.log_entries = [log_entry for log_entry in entry.log_entries if log_entry[0] > keepThreshold]
| gpl-2.0 | 710,529,559,156,328,200 | 34.853659 | 270 | 0.717092 | false |
jacobdshimer/Bro-Log-Utility-Script | bro_utility.py | 1 | 2766 | #!/usr/bin/env python
#Imports the different modules that the script uses
import argparse
import subprocess
import textwrap
import portslicer
import fieldslicer
import combiner
import bro
#Create the command-line capability
parser = argparse.ArgumentParser(prog="Bro Log Utility Script",
description=textwrap.dedent('''\
This program will slice conn.log's based off of a given port or field. It will also
combine conn.log's together in order to make slicing and analysis easier.'''),
formatter_class=argparse.RawTextHelpFormatter)
mandatory = parser.add_argument_group("Mandatory",description="These are mandatory.")
optional = parser.add_argument_group("Optional", description="These are optional switches.")
mandatory.add_argument("-i", "--input", help="The path to the conn.log", metavar="")
mandatory.add_argument("-p", "--ports", help="List of ports seperated by a comma", metavar="")
mandatory.add_argument("-f", "--fields", help="List of fields seperated by a comma", metavar="")
optional.add_argument("-b", "--bro", help="Takes in the file path of PCAPs and runs thems against Bro IDS", metavar="")
optional.add_argument("-v", "--verbose", help="Outputs status to screen", action="store_true")
optional.add_argument("-c", "--combine", help=textwrap.dedent('''\
Combine all files of a specified type into one. Specify the path to where the
files are located followed by the type enclosed in quotes. This will find all
files with the specified type in them. You just have to specify the base directory.
Example: If you wanted your conn.log's combined and they are in your home
directory in a folder titled bro, you would type:
- c "/home/user/bro/ conn.log"
This will find all conn.log's within /home/user/bro/ no matter how nested.'''),
nargs=2, metavar="")
optional.add_argument("-o", "--output", help="Specify the output name when combining conn.log's", metavar="")
args = parser.parse_args()
def main():
if args.ports > 0:
portslicer.portslicer(args.input, args.verbose, args.ports)
elif args.fields > 0:
fieldslicer.fieldslicer(args.input, args.verbose, args.fields)
elif args.combine > 0:
#runs the linux find command to find the files the user wants to combine
temp_files = subprocess.check_output(["find",args.combine[0],"-name",args.combine[-1]])
combiner.combiner(args.verbose, args.output, args.combine[-1].upper(),temp_files)
elif args.bro > 0:
#uses the linux find command to find the pcaps to run.
temp_files = subprocess.check_output(["find",args.bro,"-name snort.log"])
bro.bro(args.verbose, args.bro)
if __name__ == "__main__":
main()
| mit | 6,099,293,530,235,152,000 | 46.689655 | 119 | 0.690528 | false |
jocelynj/weboob | weboob/backends/cragr/backend.py | 1 | 4021 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon, Christophe Benz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.capabilities.bank import ICapBank, AccountNotFound
from weboob.tools.backend import BaseBackend
from weboob.tools.ordereddict import OrderedDict
from weboob.tools.value import ValuesDict, Value
from .browser import Cragr
__all__ = ['CragrBackend']
class CragrBackend(BaseBackend, ICapBank):
NAME = 'cragr'
MAINTAINER = 'Xavier Guerrin'
EMAIL = '[email protected]'
VERSION = '0.4'
DESCRIPTION = 'Credit Agricole french bank\'s website'
LICENSE = 'GPLv3'
website_choices = OrderedDict([(k, u'%s (%s)' % (v, k)) for k, v in sorted({
'm.ca-alpesprovence.fr': u'Alpes Provence',
'm.ca-anjou-maine.fr': u'Anjou Maine',
'm.ca-atlantique-vendee.fr': u'Atlantique Vendée',
'm.ca-aquitaine.fr': u'Aquitaine',
'm.ca-briepicardie.fr': u'Brie Picardie',
'm.ca-centrefrance.fr': u'Centre France',
'm.ca-centreloire.fr': u'Centre Loire',
'm.ca-centreouest.fr': u'Centre Ouest',
'm.ca-cb.fr': u'Champagne Bourgogne',
'm.ca-charente-perigord.fr': u'Charente Périgord',
'm.ca-cmds.fr': u'Charente-Maritime Deux-Sèvres',
'm.ca-corse.fr': u'Corse',
'm.ca-cotesdarmor.fr': u'Côtes d\'Armor',
'm.ca-des-savoie.fr': u'Des Savoie',
'm.ca-finistere.fr': u'Finistere',
'm.ca-paris.fr': u'Ile-de-France',
'm.ca-illeetvilaine.fr': u'Ille-et-Vilaine',
'm.ca-languedoc.fr': u'Languedoc',
'm.ca-loirehauteloire.fr': u'Loire Haute Loire',
'm.ca-lorraine.fr': u'Lorraine',
'm.ca-martinique.fr': u'Martinique Guyane',
'm.ca-morbihan.fr': u'Morbihan',
'm.ca-norddefrance.fr': u'Nord de France',
'm.ca-nord-est.fr': u'Nord Est',
'm.ca-nmp.fr': u'Nord Midi-Pyrénées',
'm.ca-normandie.fr': u'Normandie',
'm.ca-normandie-seine.fr': u'Normandie Seine',
'm.ca-pca.fr': u'Provence Côte d\'Azur',
'm.lefil.com': u'Pyrénées Gascogne',
'm.ca-reunion.fr': u'Réunion',
'm.ca-sudrhonealpes.fr': u'Sud Rhône Alpes',
'm.ca-sudmed.fr': u'Sud Méditerranée',
'm.ca-toulouse31.fr': u'Toulouse 31', # m.ca-toulousain.fr redirects here
'm.ca-tourainepoitou.fr': u'Tourraine Poitou',
}.iteritems())])
CONFIG = ValuesDict(Value('website', label='Website to use', choices=website_choices),
Value('login', label='Account ID'),
Value('password', label='Password', masked=True))
BROWSER = Cragr
def create_default_browser(self):
return self.create_browser(self.config['website'], self.config['login'], self.config['password'])
def iter_accounts(self):
for account in self.browser.get_accounts_list():
yield account
def get_account(self, _id):
if not _id.isdigit():
raise AccountNotFound()
account = self.browser.get_account(_id)
if account:
return account
else:
raise AccountNotFound()
def iter_history(self, account):
for history in self.browser.get_history(account):
yield history
def transfer(self, account, to, amount, reason=None):
return self.browser.do_transfer(account, to, amount, reason)
| gpl-3.0 | 7,785,403,536,850,628,000 | 39.08 | 105 | 0.635729 | false |
gmist/gae-de-init | main/apps/user/views.py | 1 | 7664 | # coding: utf-8
import copy
from google.appengine.ext import ndb
import flask
from apps import auth
from apps.auth import helpers
from core import task
from core import util
import config
import forms
import models
bp = flask.Blueprint(
'user',
__name__,
url_prefix='/user',
template_folder='templates',
)
###############################################################################
# User List
###############################################################################
@bp.route('/', endpoint='list')
@auth.admin_required
def user_list():
user_dbs, user_cursor, prev_cursor = models.User.get_dbs(
email=util.param('email')
)
permissions = list(forms.UserUpdateForm._permission_choices)
permissions += util.param('permissions', list) or []
return flask.render_template(
'user/admin/list.html',
html_class='user-list',
title='User List',
user_dbs=user_dbs,
next_url=util.generate_next_url(user_cursor),
prev_url=util.generate_next_url(prev_cursor),
permissions=sorted(set(permissions)),
api_url=flask.url_for('api.users')
)
@bp.route('/<int:user_id>/update/', methods=['GET', 'POST'], endpoint='update')
@auth.admin_required
def user_update(user_id):
user_db = models.User.get_by_id(user_id)
if not user_db:
flask.abort(404)
form = forms.UserUpdateForm(obj=user_db)
for permission in user_db.permissions:
form.permissions.choices.append((permission, permission))
form.permissions.choices = sorted(set(form.permissions.choices))
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append('This username is invalid.')
elif not models.User.is_username_available(form.username.data, user_db.key):
form.username.errors.append('This username is already taken.')
else:
form.populate_obj(user_db)
if auth.current_user_id() == user_db.key.id():
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for(
'user.list', order='-modified', active=user_db.active,
))
return flask.render_template(
'user/admin/update.html',
title=user_db.name,
html_class='user-update',
form=form,
user_db=user_db,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe())
)
@bp.route('/verify_email/<token>/')
@auth.login_required
def verify_email(token):
user_db = auth.current_user_db()
if user_db.token != token:
flask.flash('That link is either invalid or expired.', category='danger')
return flask.redirect(flask.url_for('user.profile_update'))
user_db.verified = True
user_db.token = util.uuid()
user_db.put()
flask.flash('Hooray! Your email is now verified.', category='success')
return flask.redirect(flask.url_for('user.profile_update'))
@bp.route('/merge/', methods=['GET', 'POST'])
@auth.admin_required
def merge():
user_keys = util.param('user_keys', list)
if not user_keys:
flask.abort(400)
user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys]
user_dbs = ndb.get_multi(user_db_keys)
if len(user_dbs) < 2:
flask.abort(400)
user_dbs.sort(key=lambda user_db: user_db.created)
merged_user_db = user_dbs[0]
auth_ids = []
permissions = []
is_admin = False
is_active = False
for user_db in user_dbs:
auth_ids.extend(user_db.auth_ids)
permissions.extend(user_db.permissions)
is_admin = is_admin or user_db.admin
is_active = is_active or user_db.active
if user_db.key.urlsafe() == util.param('user_key'):
merged_user_db = user_db
auth_ids = sorted(list(set(auth_ids)))
permissions = sorted(list(set(permissions)))
merged_user_db.permissions = permissions
merged_user_db.admin = is_admin
merged_user_db.active = is_active
merged_user_db.verified = False
form_obj = copy.deepcopy(merged_user_db)
form_obj.user_key = merged_user_db.key.urlsafe()
form_obj.user_keys = ','.join(user_keys)
form = forms.UserMergeForm(obj=form_obj)
if form.validate_on_submit():
form.populate_obj(merged_user_db)
merged_user_db.auth_ids = auth_ids
merged_user_db.put()
deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key]
merge_user_dbs(merged_user_db, deprecated_keys)
return flask.redirect(
flask.url_for('user.update', user_id=merged_user_db.key.id()),
)
return flask.render_template(
'user/admin/merge.html',
title='Merge Users',
html_class='user-merge',
user_dbs=user_dbs,
merged_user_db=merged_user_db,
form=form,
auth_ids=auth_ids,
api_url=flask.url_for('api.users', user_keys=','.join(user_keys))
)
@ndb.transactional(xg=True)
def merge_user_dbs(user_db, deprecated_keys):
# TODO: Merge possible user data before handling deprecated users
deprecated_dbs = ndb.get_multi(deprecated_keys)
for deprecated_db in deprecated_dbs:
deprecated_db.auth_ids = []
deprecated_db.active = False
deprecated_db.verified = False
if not deprecated_db.username.startswith('_'):
deprecated_db.username = '_%s' % deprecated_db.username
ndb.put_multi(deprecated_dbs)
@bp.route('/profile/')
@auth.login_required
def profile():
user_db = auth.current_user_db()
return flask.render_template(
'user/profile/index.html',
title=user_db.name,
html_class='profile-view',
user_db=user_db,
has_json=True,
api_url=flask.url_for('api.user', key=user_db.key.urlsafe()),
)
@bp.route('/profile/update/', methods=['GET', 'POST'])
@auth.login_required
def profile_update():
user_db = auth.current_user_db()
form = forms.ProfileUpdateForm(obj=user_db)
if form.validate_on_submit():
email = form.email.data
if email and not user_db.is_email_available(email, user_db.key):
form.email.errors.append('This email is already taken.')
if not form.errors:
send_verification = not user_db.token or user_db.email != email
form.populate_obj(user_db)
if send_verification:
user_db.verified = False
task.verify_email_notification(user_db)
user_db.put()
return flask.redirect(flask.url_for('pages.welcome'))
return flask.render_template(
'user/profile/update.html',
title=user_db.name,
html_class='profile',
form=form,
user_db=user_db,
)
@bp.route('/profile/password/', methods=['GET', 'POST'])
@auth.login_required
def profile_password():
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
user_db = auth.current_user_db()
form = forms.ProfilePasswordForm(obj=user_db)
if form.validate_on_submit():
errors = False
old_password = form.old_password.data
new_password = form.new_password.data
if new_password or old_password:
if user_db.password_hash:
if helpers.password_hash(user_db, old_password) != user_db.password_hash:
form.old_password.errors.append('Invalid current password')
errors = True
if not errors and old_password and not new_password:
form.new_password.errors.append('This field is required.')
errors = True
if not (form.errors or errors):
user_db.password_hash = helpers.password_hash(user_db, new_password)
flask.flash('Your password has been changed.', category='success')
if not (form.errors or errors):
user_db.put()
return flask.redirect(flask.url_for('user.profile'))
return flask.render_template(
'user/profile/password.html',
title=user_db.name,
html_class='profile-password',
form=form,
user_db=user_db,
) | mit | 7,499,373,629,555,537,000 | 29.907258 | 81 | 0.652009 | false |
MostlyOpen/odoo_addons_jcafb | jcafb_2017_lab_tests/__openerp__.py | 1 | 1954 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'JCAF 2017 Lab Tests',
'summary': 'This module will install all the JCAF 2017 Lab Tests.',
'version': '2.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://clvsol.com',
'depends': [
'myo_survey',
'myo_lab_test_cst',
],
'data': [
# 'data/survey_jcafb_EAN17.xml',
# 'data/survey_jcafb_EDH17.xml',
# 'data/survey_jcafb_EPC17.xml',
# 'data/survey_jcafb_EPI17.xml',
# 'data/survey_jcafb_EUR17.xml',
'data/lab_test_EAN17_data.xml',
'data/lab_test_EDH17_data.xml',
'data/lab_test_EPC17_data.xml',
'data/lab_test_EPI17_data.xml',
'data/lab_test_ECP17_data.xml',
'data/lab_test_EEV17_data.xml',
'data/lab_test_EUR17_data.xml',
],
'demo': [],
'test': [],
'init_xml': [],
'test': [],
'update_xml': [],
'installable': True,
'application': False,
'active': False,
'css': [],
}
| agpl-3.0 | 5,378,893,779,605,012,000 | 33.892857 | 79 | 0.571648 | false |
manassolanki/erpnext | erpnext/stock/report/stock_ledger/stock_ledger.py | 1 | 5637 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
items = get_items(filters)
sl_entries = get_stock_ledger_entries(filters, items)
item_details = get_item_details(items, sl_entries)
opening_row = get_opening_balance(filters, columns)
data = []
if opening_row:
data.append(opening_row)
for sle in sl_entries:
item_detail = item_details[sle.item_code]
data.append([sle.date, sle.item_code, item_detail.item_name, item_detail.item_group,
item_detail.brand, item_detail.description, sle.warehouse,
item_detail.stock_uom, sle.actual_qty, sle.qty_after_transaction,
(sle.incoming_rate if sle.actual_qty > 0 else 0.0),
sle.valuation_rate, sle.stock_value, sle.voucher_type, sle.voucher_no,
sle.batch_no, sle.serial_no, sle.project, sle.company])
return columns, data
def get_columns():
columns = [
_("Date") + ":Datetime:95", _("Item") + ":Link/Item:130",
_("Item Name") + "::100", _("Item Group") + ":Link/Item Group:100",
_("Brand") + ":Link/Brand:100", _("Description") + "::200",
_("Warehouse") + ":Link/Warehouse:100", _("Stock UOM") + ":Link/UOM:100",
_("Qty") + ":Float:50", _("Balance Qty") + ":Float:100",
{"label": _("Incoming Rate"), "fieldtype": "Currency", "width": 110,
"options": "Company:company:default_currency"},
{"label": _("Valuation Rate"), "fieldtype": "Currency", "width": 110,
"options": "Company:company:default_currency"},
{"label": _("Balance Value"), "fieldtype": "Currency", "width": 110,
"options": "Company:company:default_currency"},
_("Voucher Type") + "::110",
_("Voucher #") + ":Dynamic Link/" + _("Voucher Type") + ":100",
_("Batch") + ":Link/Batch:100",
_("Serial #") + ":Link/Serial No:100",
_("Project") + ":Link/Project:100",
{"label": _("Company"), "fieldtype": "Link", "width": 110,
"options": "company", "fieldname": "company"}
]
return columns
def get_stock_ledger_entries(filters, items):
item_conditions_sql = ''
if items:
item_conditions_sql = 'and sle.item_code in ({})'\
.format(', '.join(['"' + frappe.db.escape(i) + '"' for i in items]))
return frappe.db.sql("""select concat_ws(" ", posting_date, posting_time) as date,
item_code, warehouse, actual_qty, qty_after_transaction, incoming_rate, valuation_rate,
stock_value, voucher_type, voucher_no, batch_no, serial_no, company, project
from `tabStock Ledger Entry` sle
where company = %(company)s and
posting_date between %(from_date)s and %(to_date)s
{sle_conditions}
{item_conditions_sql}
order by posting_date asc, posting_time asc, name asc"""\
.format(
sle_conditions=get_sle_conditions(filters),
item_conditions_sql = item_conditions_sql
), filters, as_dict=1)
def get_items(filters):
conditions = []
if filters.get("item_code"):
conditions.append("item.name=%(item_code)s")
else:
if filters.get("brand"):
conditions.append("item.brand=%(brand)s")
if filters.get("item_group"):
conditions.append(get_item_group_condition(filters.get("item_group")))
items = []
if conditions:
items = frappe.db.sql_list("""select name from `tabItem` item where {}"""
.format(" and ".join(conditions)), filters)
return items
def get_item_details(items, sl_entries):
item_details = {}
if not items:
items = list(set([d.item_code for d in sl_entries]))
if not items:
return item_details
for item in frappe.db.sql("""
select name, item_name, description, item_group, brand, stock_uom
from `tabItem`
where name in ({0})
""".format(', '.join(['"' + frappe.db.escape(i,percent=False) + '"' for i in items])), as_dict=1):
item_details.setdefault(item.name, item)
return item_details
def get_sle_conditions(filters):
conditions = []
if filters.get("warehouse"):
warehouse_condition = get_warehouse_condition(filters.get("warehouse"))
if warehouse_condition:
conditions.append(warehouse_condition)
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
if filters.get("batch_no"):
conditions.append("batch_no=%(batch_no)s")
if filters.get("project"):
conditions.append("project=%(project)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_opening_balance(filters, columns):
if not (filters.item_code and filters.warehouse and filters.from_date):
return
from erpnext.stock.stock_ledger import get_previous_sle
last_entry = get_previous_sle({
"item_code": filters.item_code,
"warehouse_condition": get_warehouse_condition(filters.warehouse),
"posting_date": filters.from_date,
"posting_time": "00:00:00"
})
row = [""]*len(columns)
row[1] = _("'Opening'")
for i, v in ((9, 'qty_after_transaction'), (11, 'valuation_rate'), (12, 'stock_value')):
row[i] = last_entry.get(v, 0)
return row
def get_warehouse_condition(warehouse):
warehouse_details = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt"], as_dict=1)
if warehouse_details:
return " exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
return ''
def get_item_group_condition(item_group):
item_group_details = frappe.db.get_value("Item Group", item_group, ["lft", "rgt"], as_dict=1)
if item_group_details:
return "item.item_group in (select ig.name from `tabItem Group` ig \
where ig.lft >= %s and ig.rgt <= %s and item.item_group = ig.name)"%(item_group_details.lft,
item_group_details.rgt)
return ''
| gpl-3.0 | -2,148,252,826,371,442,200 | 34.904459 | 100 | 0.672166 | false |
cosmoharrigan/pyrolog | prolog/interpreter/test/test_iso.py | 1 | 4934 | import py, os
from prolog.interpreter.parsing import TermBuilder
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.continuation import Heap, Engine
from prolog.interpreter import error
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true, prolog_raises
from prolog.interpreter.error import UncaughtError
TESTDIR = str(py.path.local(__file__).dirpath().join('inriasuite'))
FAILURE = 'failure'
SUCCESS = 'success'
SKIP = "_SKIP_"
XFAIL = "_XFAIL_"
def deconstruct_line(line):
HALT = 'halt,'
FAIL = 'fail,'
TRUE = 'true,'
FLOAT = '0.33'
ASSIGN = 'X ='
CUT = '!,'
line_0_5 = line[0:5]
H_F_T = (HALT, FAIL, TRUE)
if line_0_5 in H_F_T:
if line_0_5 == FAIL:
left = 'fail'
elif line_0_5 == HALT:
left = 'halt'
elif line_0_5 == TRUE:
left = 'true'
right = line[6:]
elif line.startswith(CUT):
left = '!'
right = line[3:]
elif line.startswith(ASSIGN):
left = 'X = "fred"'
right = line[12:]
elif line.startswith(FLOAT):
left = '0.333 =:= 1/3'
right = line[15:]
else:
first_open_par = line.find('(')
brace_counter = 1
i = first_open_par
while brace_counter:
i += 1
if line[i] == '(':
brace_counter += 1
elif line[i] == ')':
brace_counter -= 1
left = line[0:i + 1]
right = line[i + 2:].strip()
return left, right
def get_lines(file_):
testfile = open(TESTDIR + '/' + file_)
for test in testfile.readlines():
if test.endswith('%%SKIP%%\n'):
yield SKIP, ""
elif test.find('0\'') != -1 or test.find("current_prolog_flag") != -1:
yield XFAIL, ""
elif test.startswith('['):
last_bracket = test.rfind(']')
first_bracket = test.find('[') + 1
assert first_bracket <= last_bracket
relevant = test[first_bracket:last_bracket]
left, right = deconstruct_line(relevant)
yield left, right
def deconstruct_list(l):
pieces = [piece for piece in l.split('], [')]
pieces[0] = pieces[0][1:]
return pieces
def get_files():
_, _, content = os.walk(TESTDIR).next()
for file in content:
yield file
def pytest_generate_tests(metafunc):
for f in get_files():
for left, right in get_lines(f):
# these tests can't pass because not implemented functionality is
# required, or they don't make sense at all...
if left in [SKIP, XFAIL]:
metafunc.addcall(funcargs = dict(cmd = "skip", test = "", param = left))
# simple test: failure or success
if right in (FAILURE, SUCCESS):
metafunc.addcall(funcargs=dict(cmd="simple", test=(left + '.'), param=right))
# test whether an error occurs
if right.find('error') != -1:
metafunc.addcall(funcargs=dict(cmd="error", test=left, param=right))
# test unification with a list of unifications
if right.find('[') != -1 and right.find('error') == -1:
lists = deconstruct_list(right[1:-2])
metafunc.addcall(funcargs=dict(cmd="list", test=left, param=lists))
def test_all_tests(cmd, test, param):
if cmd == "skip":
if param == SKIP:
py.test.skip("")
elif param == XFAIL:
py.test.xfail("")
elif cmd == "simple":
try:
if param == FAILURE:
assert_false(test)
elif param == SUCCESS:
assert_true(test)
except (error.UncaughtError, error.CatchableError), e:
msg = repr(e.term)
if 'existence_error' in msg:
py.test.skip(msg)
else:
py.test.xfail("")
except:
py.test.xfail("")
elif cmd == "error":
try:
prolog_raises(param, test)
except UncaughtError, e:
msg = repr(e.term)
if 'existence_error' in msg or 'type_error' in msg:
py.test.skip(msg)
else:
py.test.xfail("fix me")
except:
py.test.xfail("")
elif cmd == "list":
try:
for goal in param:
check = test + ', ' + goal.replace('<--', '=') + '.'
try:
assert_true(check)
except:
py.test.xfail("fix me")
except (error.UncaughtError, error.CatchableError), e:
msg = repr(e.term)
if 'existence_error' in msg:
py.test.skip(msg)
else:
py.test.xfail("fix me")
except:
py.test.xfail("")
| mit | 7,872,124,824,838,538,000 | 32.114094 | 94 | 0.520268 | false |
bootleg/ret-sync | ext_ida/retsync/dispatcher.py | 1 | 16187 | #
# Copyright (C) 2016-2021, Alexandre Gazet.
#
# Copyright (C) 2012-2015, Quarkslab.
#
# This file is part of ret-sync.
#
# ret-sync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as altpath
import sys
import time
import socket
import select
import re
import json
import traceback
from contextlib import contextmanager
import rsconfig
from rsconfig import rs_encode, rs_decode, load_configuration
# Logging
rs_log = rsconfig.init_logging(__file__)
class Client():
def __init__(self, s_client, s_srv, name):
self.client_sock = s_client
self.srv_sock = s_srv
self.name = name
self.enabled = False
self.buffer = ''
def close(self):
self.enabled = False
if self.client_sock:
self.client_sock.close()
if self.srv_sock:
self.srv_sock.close()
def feed(self, data):
batch = []
self.buffer = ''.join([self.buffer, data])
if self.buffer.endswith("\n"):
batch = [req.strip() for req in self.buffer.split('\n') if req != '']
self.buffer = ''
return batch
class DispatcherSrv():
def __init__(self):
self.idb_clients = []
self.dbg_client = None
self.srv_socks = []
self.opened_socks = []
self.current_dbg = None
self.current_dialect = 'unknown'
self.current_idb = None
self.current_module = None
self.sync_mode_auto = True
self.disconn_pat = re.compile('dbg disconnected')
self.req_handlers = {
'new_client': self.req_new_client,
'new_dbg': self.req_new_dbg,
'dbg_quit': self.req_dbg_quit,
'idb_n': self.req_idb_n,
'idb_list': self.req_idb_list,
'module': self.req_module,
'dbg_err': self.req_dbg_err,
'sync_mode': self.req_sync_mode,
'cmd': self.req_cmd,
'bc': self.req_bc,
'kill': self.req_kill
}
def is_port_available(self, host, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sys.platform == 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, port))
finally:
sock.close()
def bind_sock(self, host, port):
self.is_port_available(host, port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.srv_socks.append(sock)
return sock
def bind(self, host, port):
self.dbg_srv_sock = self.bind_sock(host, port)
if not (socket.gethostbyname(host) == '127.0.0.1'):
self.localhost_sock = self.bind_sock('127.0.0.1', port)
def accept(self, s):
new_socket, addr = s.accept()
self.opened_socks.append(new_socket)
def listen(self):
for s in self.srv_socks:
s.listen(5)
def close(self, s):
s.close()
self.opened_socks.remove(s)
def loop(self):
self.listen()
self.announcement('dispatcher listening')
while True:
rlist, wlist, xlist = select.select(self.srv_socks + self.opened_socks, [], [])
if not rlist:
self.announcement('socket error: select')
raise Exception('rabbit eating the cable')
for s in rlist:
if s in self.srv_socks:
self.accept(s)
else:
self.handle(s)
def handle(self, s):
client = self.sock_to_client(s)
for req in self.recvall(client):
self.parse_exec(s, req)
# find client object for its srv socket
def sock_to_client(self, s):
if self.current_dbg and (s == self.current_dbg.srv_sock):
client = self.current_dbg
else:
clist = [client for client in self.idb_clients if (client.srv_sock == s)]
if not clist:
client = Client(None, s, None)
self.idb_clients.append(client)
else:
client = clist[0]
return client
# buffered readline like function
def recvall(self, client):
try:
data = rs_decode(client.srv_sock.recv(4096))
if data == '':
raise Exception('recv failed')
except socket.error:
if client == self.current_dbg:
self.broadcast('debugger closed the connection')
self.dbg_quit()
else:
self.client_quit(client.srv_sock)
self.broadcast("a client quit, %d client(s) left" % len(self.idb_clients))
return []
return client.feed(data)
# parse and execute requests from clients (idbs or dbg)
def parse_exec(self, s, req):
if not (req.startswith('[notice]')):
# this is a normal [sync] request from debugger, forward it
self.forward(req)
# receive 'dbg disconnected', socket can be closed
if re.search(self.disconn_pat, req):
self.close(s)
return
req = self.normalize(req, 8)
try:
hash = json.loads(req)
except ValueError:
self.broadcast("dispatcher failed to parse json\n %s\n" % req)
return
ntype = hash['type']
if ntype not in self.req_handlers:
self.broadcast("dispatcher unknown request: %s" % ntype)
return
req_handler = self.req_handlers[ntype]
req_handler(s, hash)
def normalize(self, req, taglen):
req = req[taglen:]
req = req.replace("\\", "\\\\")
req = req.replace("\n", "")
return req.strip()
# dispatcher announcements are forwarded to the idb
def announcement(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
try:
announce = "[notice]{\"type\":\"dispatcher\",\"subtype\":\"msg\",\"msg\":\"%s\"}\n" % msg
s.sendall(rs_encode(announce))
except socket.error:
return
# send message to all connected idb clients
def broadcast(self, msg):
for idbc in self.idb_clients:
self.announcement(msg, idbc.client_sock)
# send dbg message to currently active idb client
def forward(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
if s and self.current_idb.enabled:
fwmsg = "%s\n" % msg
s.sendall(rs_encode(fwmsg))
# send dbg message to all idb clients
def forward_all(self, msg, s=None):
for idbc in self.idb_clients:
self.forward(msg, idbc.client_sock)
# send a beacon to the broker
def send_beacon(self, s):
s.sendall(rs_encode("[notice]{\"type\":\"dispatcher\",\"subtype\":\"beacon\"}\n"))
# disable current idb and enable new idb matched from current module name
def switch_idb(self, new_idb):
msg = "[sync]{\"type\":\"broker\",\"subtype\":\"%s\"}\n"
if (not self.current_idb == new_idb) and (self.current_idb and self.current_idb.enabled):
switchmsg = msg % 'disable_idb'
self.current_idb.client_sock.sendall(rs_encode(switchmsg))
self.current_idb.enabled = False
if new_idb:
switchmsg = msg % 'enable_idb'
new_idb.client_sock.sendall(rs_encode(switchmsg))
self.current_idb = new_idb
self.current_idb.enabled = True
# a new idb client connects to the dispatcher via its broker
def req_new_client(self, srv_sock, hash):
port, name = hash['port'], hash['idb']
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.settimeout(2)
client_sock.connect(('localhost', port))
self.opened_socks.append(client_sock)
except socket.error:
self.opened_socks.remove(srv_sock)
srv_sock.close()
return
# send beacon to acknowledge dispatcher presence
self.send_beacon(client_sock)
# check if an idb client is already registered with the same name
conflicting = [client for client in self.idb_clients if (client.name == name)]
# promote to idb client
new_client = self.sock_to_client(srv_sock)
new_client.client_sock = client_sock
new_client.name = name
self.broadcast("add new client (listening on port %d), nb client(s): %d" % (port, len(self.idb_clients)))
if conflicting:
self.broadcast("conflicting name: %s !" % new_client.name)
if not self.current_idb:
self.current_idb = new_client
# if new client match current module name, then enable it
if self.current_module == name:
self.switch_idb(new_client)
# inform new client about debugger's dialect
self.dbg_dialect(new_client)
# clean state when a client is quiting
def client_quit(self, s):
self.opened_socks.remove(s)
# remove exiting client from the list of active clients
for idbc in [idbc for idbc in self.idb_clients if (idbc.srv_sock == s)]:
self.idb_clients.remove(idbc)
self.opened_socks.remove(idbc.client_sock)
idbc.close()
# no more clients, let's kill ourself
if not self.idb_clients:
for s in self.srv_socks:
s.close()
sys.exit()
# determine if debugger is Windows specific
def is_windows_dbg(self, dialect):
return (dialect in ['windbg', 'x64_dbg', 'ollydbg2'])
# a new debugger client connects to the dispatcher
def req_new_dbg(self, s, hash):
msg = hash['msg']
if self.current_dbg:
self.dbg_quit()
# promote to debugger client
self.current_dbg = self.sock_to_client(s)
self.current_dbg.client_sock = s
self.idb_clients.remove(self.current_dbg)
self.broadcast("new debugger client: %s" % msg)
# store debugger's dialect
if 'dialect' in hash:
self.current_dialect = hash['dialect']
# case when IDA is on a linux/bsd host and connected to remote windows
# use ntpath instead of posixpath
if sys.platform.startswith('linux') or sys.platform == 'darwin':
if self.is_windows_dbg(self.current_dialect):
global altpath
import ntpath as altpath
self.dbg_dialect()
# inform client about debugger's dialect
def dbg_dialect(self, client=None):
msg = "[sync]{\"type\":\"dialect\",\"dialect\":\"%s\"}\n" % self.current_dialect
if client:
client.client_sock.sendall(rs_encode(msg))
else:
for idbc in self.idb_clients:
idbc.client_sock.sendall(rs_encode(msg))
# debugger client disconnect from the dispatcher
def req_dbg_quit(self, s, hash):
msg = hash['msg']
self.broadcast("debugger quit: %s" % msg)
self.dbg_quit()
# clean state when debugger is quiting
def dbg_quit(self):
self.opened_socks.remove(self.current_dbg.srv_sock)
self.current_dbg.close()
self.current_dbg = None
self.current_module = None
self.switch_idb(None)
self.current_dialect = 'unknown'
# handle kill notice from a client, exit properly if no more client
def req_kill(self, s, hash):
self.client_quit(s)
self.broadcast("received a kill notice from client, %d client(s) left" % len(self.idb_clients))
# send list of currently connected idb clients
def req_idb_list(self, s, hash):
clist = "> currently connected idb(s):\n"
if not self.idb_clients:
clist += " no idb client yet\n"
else:
for i in range(len(self.idb_clients)):
clist += (" [%d] %s\n" % (i, self.idb_clients[i].name))
s.sendall(rs_encode(clist))
# manually set current active idb to idb n from idb list
def req_idb_n(self, s, hash):
idb = hash['idb']
try:
idbn = int(idb)
except (TypeError, ValueError) as e:
s.sendall(rs_encode('> idb_n error: n should be a decimal value'))
return
try:
idbc = self.idb_clients[idbn]
except IndexError:
msg = "> idb_n error: index %d is invalid (see idblist)" % idbn
s.sendall(rs_encode(msg))
return
self.switch_idb(idbc)
msg = "> active idb is now \"%s\" (%d)" % (idbc.name, idbn)
s.sendall(rs_encode(msg))
# dbg notice that its current module has changed
def req_module(self, s, hash):
modpath = hash['path']
self.current_module = modname = altpath.basename(modpath)
matching = [idbc for idbc in self.idb_clients if (idbc.name.lower() == modname.lower())]
if not self.sync_mode_auto:
self.broadcast('sync_mode_auto off')
return
if len(matching) == 1:
# matched is set as active
self.switch_idb(matching[0])
else:
if not len(matching):
msg = "mod request has no match for %s"
else:
msg = "ambiguous mod request, too many matches for %s"
self.broadcast(msg % modname)
# no match, current idb (if existing) is disabled
if self.current_idb and self.current_idb.enabled:
self.switch_idb(None)
# dbg notice of error, e.g. current module resolution failed
def req_dbg_err(self, s, hash):
if self.sync_mode_auto:
self.switch_idb(None)
# sync mode tells if idb switch is automatic or manual
def req_sync_mode(self, s, hash):
mode = hash['auto']
if mode in ['on', 'off']:
self.broadcast("sync mode auto set to %s" % mode)
self.sync_mode_auto = (mode == 'on')
else:
self.broadcast("sync mode auto invalid param %s" % mode)
# bc request should be forwarded to all idbs
def req_bc(self, s, hash):
msg = "[sync]%s" % json.dumps(hash)
self.forward_all(msg)
def req_cmd(self, s, hash):
cmd = "%s\n" % hash['cmd']
self.current_dbg.client_sock.sendall(rs_encode(cmd))
# use logging facility to record the exception and exit
def err_log(self, msg):
rs_log.exception(msg, exc_info=True)
try:
self.broadcast('dispatcher stopped')
time.sleep(0.2)
[sckt.close() for sckt in self.srv_socks]
except Exception:
pass
finally:
sys.exit()
@contextmanager
def error_reporting(stage, info=None):
try:
yield
except Exception as e:
server.err_log(' error - '.join(filter(None, (stage, info))))
if __name__ == "__main__":
server = DispatcherSrv()
with error_reporting('server.config'):
rs_cfg = load_configuration()
with error_reporting('server.bind', '(%s:%s)' % (rs_cfg.host, rs_cfg.port)):
server.bind(rs_cfg.host, rs_cfg.port)
with error_reporting('server.loop'):
server.loop()
| gpl-3.0 | -4,726,392,119,650,906,000 | 31.767206 | 113 | 0.572744 | false |
adregan/pnger | decoder.py | 1 | 2027 | from chunks import split_into_chunks, parse_header_and_data
from pixels import PIXELS
from scanlines import split_scanlines
from filters import Filters
def reconstructer(scanlines, bpp):
reconstructed = [list(scanline.get('bytes')) for scanline in scanlines]
def reconstruct_byte(filter_type, current_byte, y, x):
above = y - 1
left = x - bpp
a = 0 if left < 0 else reconstructed[y][left]
b = 0 if above < 0 else reconstructed[above][x]
c = 0 if (above < 0 or left < 0) else reconstructed[above][left]
return Filters('reconstruct')[filter_type](current_byte, a, b, c)
for y, scanline in enumerate(scanlines):
for x, current_byte in enumerate(scanline.get('bytes')):
reconstructed[y][x] = reconstruct_byte(
scanline.get('type'), current_byte, y, x)
return reconstructed
class Decoder(object):
def __init__(self, file_path):
with open('{}'.format(file_path), 'rb') as file:
self.image_bytes = file.read()
valid_png_header = b'\x89PNG\r\n\x1a\n'
if self.image_bytes[0:8] != valid_png_header:
raise InvalidPNG('not a valid header')
self.chunks = split_into_chunks(self.image_bytes[8:])
self.header_chunk, self.data_chunk = parse_header_and_data(self.chunks)
try:
self.pixel = PIXELS[self.header_chunk.color_type]
except KeyError as err:
raise KeyError('I haven\'t done that yet.')
else:
self.bpp = int(
len(self.pixel._fields) * (self.header_chunk.bit_depth / 8))
self.scanlines = split_scanlines(
self.header_chunk.width,
self.header_chunk.height,
self.bytes_per_pixel,
self.data_chunk
)
@property
def Pixel(self):
return self.pixel
@property
def bytes_per_pixel(self):
return self.bpp
def decode(self):
return reconstructer(self.scanlines, self.bytes_per_pixel)
| mit | -5,403,940,523,025,428,000 | 31.693548 | 79 | 0.607301 | false |
mhrivnak/pulp | server/pulp/plugins/conduits/mixins.py | 1 | 28436 | from gettext import gettext as _
import logging
import sys
from pymongo.errors import DuplicateKeyError
from pulp.plugins.model import Unit, PublishReport
from pulp.plugins.types import database as types_db
from pulp.server.async.tasks import get_current_task_id
from pulp.server.db.model.dispatch import TaskStatus
from pulp.server.exceptions import MissingResource
import pulp.plugins.conduits._common as common_utils
import pulp.server.managers.factory as manager_factory
_logger = logging.getLogger(__name__)
class ImporterConduitException(Exception):
"""
General exception that wraps any exception coming out of the Pulp server.
"""
pass
class DistributorConduitException(Exception):
"""
General exception that wraps any exception coming out of the Pulp server.
"""
pass
class ProfilerConduitException(Exception):
"""
General exception that wraps any exception coming out of the Pulp server.
"""
pass
class ContentSourcesConduitException(Exception):
"""
General exception that wraps any exception coming out of the Pulp server.
"""
pass
class RepoScratchPadMixin(object):
def __init__(self, repo_id, exception_class):
self.repo_id = repo_id
self.exception_class = exception_class
def get_repo_scratchpad(self):
"""
Returns the repository-level scratchpad for this repository. The
repository-level scratchpad can be seen and edited by all importers
and distributors on the repository. Care should be taken to not destroy
any data set by another plugin. This may be used to communicate between
importers, distributors and profilers relevant data for the repository.
"""
try:
repo_manager = manager_factory.repo_manager()
value = repo_manager.get_repo_scratchpad(self.repo_id)
return value
except Exception, e:
_logger.exception(
_('Error getting repository scratchpad for repo [%(r)s]') % {'r': self.repo_id})
raise self.exception_class(e), None, sys.exc_info()[2]
def set_repo_scratchpad(self, value):
"""
Saves the given value to the repository-level scratchpad for this
repository. It can be retrieved in subsequent importer, distributor
and profiler operations through get_repo_scratchpad.
@param value: will overwrite the existing scratchpad
@type value: dict
@raise ImporterConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
repo_manager = manager_factory.repo_manager()
repo_manager.set_repo_scratchpad(self.repo_id, value)
except Exception, e:
_logger.exception(
_('Error setting repository scratchpad for repo [%(r)s]') % {'r': self.repo_id})
raise self.exception_class(e), None, sys.exc_info()[2]
def update_repo_scratchpad(self, scratchpad):
"""
Update the repository scratchpad with the specified key-value pairs.
New keys are added; existing keys are updated.
:param scratchpad: a dict used to update the scratchpad.
"""
try:
manager = manager_factory.repo_manager()
manager.update_repo_scratchpad(self.repo_id, scratchpad)
except Exception, e:
msg = _('Error updating repository scratchpad for repo [%(r)s]') % {'r': self.repo_id}
_logger.exception(msg)
raise self.exception_class(e), None, sys.exc_info()[2]
class RepoScratchpadReadMixin(object):
"""
Used for read only access to a repository's scratchpad. The intention is for
this to be used by repository group plugins to access but not change
the scratchpads for the repositories in the group.
"""
def __init__(self, exception_class):
self.exception_class = exception_class
def get_repo_scratchpad(self, repo_id):
"""
Returns the repository-level scratchpad for the indicated repository.
@raise ImporterConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
repo_manager = manager_factory.repo_manager()
value = repo_manager.get_repo_scratchpad(repo_id)
return value
except Exception, e:
_logger.exception(
_('Error getting repository scratchpad for repo [%(r)s]') % {'r': repo_id})
raise self.exception_class(e), None, sys.exc_info()[2]
class SingleRepoUnitsMixin(object):
def __init__(self, repo_id, exception_class):
self.repo_id = repo_id
self.exception_class = exception_class
def get_units(self, criteria=None, as_generator=False):
"""
Returns the collection of content units associated with the repository
being operated on.
Units returned from this call will have the id field populated and are
usable in any calls in this conduit that require the id field.
:param criteria: used to scope the returned results or the data within;
the Criteria class can be imported from this module
:type criteria: UnitAssociationCriteria
:return: list of unit instances
:rtype: list or generator of AssociatedUnit
"""
return do_get_repo_units(self.repo_id, criteria, self.exception_class, as_generator)
class MultipleRepoUnitsMixin(object):
def __init__(self, exception_class):
self.exception_class = exception_class
def get_units(self, repo_id, criteria=None, as_generator=False):
"""
Returns the collection of content units associated with the given
repository.
Units returned from this call will have the id field populated and are
usable in any calls in this conduit that require the id field.
:param criteria: used to scope the returned results or the data within;
the Criteria class can be imported from this module
:type criteria: UnitAssociationCriteria
:return: list of unit instances
:rtype: list or generator of AssociatedUnit
"""
return do_get_repo_units(repo_id, criteria, self.exception_class, as_generator)
class SearchUnitsMixin(object):
def __init__(self, exception_class):
self.exception_class = exception_class
def search_all_units(self, type_id, criteria):
"""
Searches for units of a given type in the server, regardless of their
associations to any repositories.
@param type_id: indicates the type of units being retrieved
@type type_id: str
@param criteria: used to query which units are returned
@type criteria: pulp.server.db.model.criteria.Criteria
@return: list of unit instances
@rtype: list of L{Unit}
"""
try:
query_manager = manager_factory.content_query_manager()
units = query_manager.find_by_criteria(type_id, criteria)
type_def = types_db.type_definition(type_id)
transfer_units = []
for pulp_unit in units:
u = common_utils.to_plugin_unit(pulp_unit, type_def)
transfer_units.append(u)
return transfer_units
except Exception, e:
_logger.exception('Exception from server requesting all units of type [%s]' % type_id)
raise self.exception_class(e), None, sys.exc_info()[2]
def find_unit_by_unit_key(self, type_id, unit_key):
"""
Finds a unit based on its unit key. If more than one unit comes back,
an exception will be raised.
@param type_id: indicates the type of units being retrieved
@type type_id: str
@param unit_key: the unit key for the unit
@type unit_key: dict
@return: a single unit
@rtype: L{Unit}
"""
content_query_manager = manager_factory.content_query_manager()
try:
# this call returns a unit or raises MissingResource
existing_unit = content_query_manager.get_content_unit_by_keys_dict(type_id, unit_key)
type_def = types_db.type_definition(type_id)
plugin_unit = common_utils.to_plugin_unit(existing_unit, type_def)
return plugin_unit
except MissingResource:
return None
class ImporterScratchPadMixin(object):
def __init__(self, repo_id, importer_id):
self.repo_id = repo_id
self.importer_id = importer_id
def get_scratchpad(self):
"""
Returns the value set for the importer's private scratchpad for this
repository. If no value has been set, None is returned.
@return: value saved for the repository and this importer
@rtype: <serializable>
@raise ImporterConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
importer_manager = manager_factory.repo_importer_manager()
value = importer_manager.get_importer_scratchpad(self.repo_id)
return value
except Exception, e:
_logger.exception(_('Error getting scratchpad for repo [%(r)s]') % {'r': self.repo_id})
raise ImporterConduitException(e), None, sys.exc_info()[2]
def set_scratchpad(self, value):
"""
Saves the given value to the importer's private scratchpad for this
repository. It can later be retrieved in subsequent importer operations
through get_scratchpad. The type for the given value is anything that
can be stored in the database (string, list, dict, etc.).
@param value: will overwrite the existing scratchpad
@type value: <serializable>
@raise ImporterConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
importer_manager = manager_factory.repo_importer_manager()
importer_manager.set_importer_scratchpad(self.repo_id, value)
except Exception, e:
_logger.exception(_('Error setting scratchpad for repo [%(r)s]') % {'r': self.repo_id})
raise ImporterConduitException(e), None, sys.exc_info()[2]
class DistributorScratchPadMixin(object):
def __init__(self, repo_id, distributor_id):
self.repo_id = repo_id
self.distributor_id = distributor_id
def get_scratchpad(self):
"""
Returns the value set in the scratchpad for this repository. If no
value has been set, None is returned.
@return: value saved for the repository and this distributor
@rtype: <serializable>
@raises DistributorConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
distributor_manager = manager_factory.repo_distributor_manager()
value = distributor_manager.get_distributor_scratchpad(self.repo_id,
self.distributor_id)
return value
except Exception, e:
_logger.exception('Error getting scratchpad for repository [%s]' % self.repo_id)
raise DistributorConduitException(e), None, sys.exc_info()[2]
def set_scratchpad(self, value):
"""
Saves the given value to the scratchpad for this repository. It can later
be retrieved in subsequent syncs through get_scratchpad. The type for
the given value is anything that can be stored in the database (string,
list, dict, etc.).
@param value: will overwrite the existing scratchpad
@type value: <serializable>
@raises DistributorConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
distributor_manager = manager_factory.repo_distributor_manager()
distributor_manager.set_distributor_scratchpad(self.repo_id, self.distributor_id, value)
except Exception, e:
_logger.exception('Error setting scratchpad for repository [%s]' % self.repo_id)
raise DistributorConduitException(e), None, sys.exc_info()[2]
class RepoGroupDistributorScratchPadMixin(object):
def __init__(self, group_id, distributor_id):
self.group_id = group_id
self.distributor_id = distributor_id
def get_scratchpad(self):
"""
Returns the value set in the scratchpad for this repository group. If no
value has been set, None is returned.
@return: value saved for the repository group and this distributor
@rtype: object
@raises DistributorConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
distributor_manager = manager_factory.repo_group_distributor_manager()
value = distributor_manager.get_distributor_scratchpad(self.group_id,
self.distributor_id)
return value
except Exception, e:
_logger.exception('Error getting scratchpad for repository [%s]' % self.group_id)
raise DistributorConduitException(e), None, sys.exc_info()[2]
def set_scratchpad(self, value):
"""
Saves the given value to the scratchpad for this repository group. It
can later be retrieved in subsequent syncs through get_scratchpad. The
type for the given value is anything that can be stored in the database
(string, list, dict, etc.).
@param value: will overwrite the existing scratchpad
@type value: object
@raises DistributorConduitException: wraps any exception that may occur
in the Pulp server
"""
try:
distributor_manager = manager_factory.repo_group_distributor_manager()
distributor_manager.set_distributor_scratchpad(self.group_id, self.distributor_id,
value)
except Exception, e:
_logger.exception('Error setting scratchpad for repository [%s]' % self.group_id)
raise DistributorConduitException(e), None, sys.exc_info()[2]
class AddUnitMixin(object):
"""
Used to communicate back into the Pulp server while an importer performs
commands related to adding and linking units.
Instances of this class should *not* be cached between calls into the importer.
Each call will be issued its own conduit instance that is scoped
to that run of the operation alone.
Instances of this class are thread-safe. The importer implementation is
allowed to do whatever threading makes sense to optimize its process.
Calls into this instance do not have to be coordinated for thread safety,
the instance will take care of it itself.
"""
def __init__(self, repo_id, importer_id, association_owner_type, association_owner_id):
"""
@param repo_id: identifies the repo being synchronized
@type repo_id: str
@param importer_id: identifies the importer performing the sync
@type importer_id: str
@param association_owner_type: type used when creating associations;
set to either importer or user depending on what call is being
made into the importer
@type association_owner_type: str
@param association_owner_id: ID of the association owner
@type association_owner_id: str
"""
self.repo_id = repo_id
self.importer_id = importer_id
self.association_owner_type = association_owner_type
self.association_owner_id = association_owner_id
self._added_count = 0
self._updated_count = 0
self._association_owner_id = association_owner_id
def init_unit(self, type_id, unit_key, metadata, relative_path):
"""
Initializes the Pulp representation of a content unit. The conduit will
use the provided information to generate any unit metadata that it needs
to. A populated transfer object representation of the unit will be
returned from this call. The returned unit should be used in subsequent
calls to this conduit.
This call makes no changes to the Pulp server. At the end of this call,
the unit's id field will *not* be populated.
The unit_key and metadata will be merged as they are saved in Pulp to
form the full representation of the unit. If values are specified in
both dictionaries, the unit_key value takes precedence.
If the importer wants to save the bits for the unit, the relative_path
value should be used to indicate a unique -- with respect to the type
of unit -- relative path where it will be saved. Pulp will convert this
into an absolute path on disk where the unit should actually be saved.
The absolute path is stored in the returned unit object.
@param type_id: must correspond to a type definition in Pulp
@type type_id: str
@param unit_key: dictionary of whatever fields are necessary to uniquely
identify this unit from others of the same type
@type unit_key: dict
@param metadata: dictionary of key-value pairs to describe the unit
@type metadata: dict
@param relative_path: see above; may be None
@type relative_path: str, None
@return: object representation of the unit, populated by Pulp with both
provided and derived values
@rtype: pulp.plugins.model.Unit
"""
try:
# Generate the storage location
if relative_path is not None:
content_query_manager = manager_factory.content_query_manager()
path = content_query_manager.request_content_unit_file_path(type_id, relative_path)
else:
path = None
u = Unit(type_id, unit_key, metadata, path)
return u
except Exception, e:
msg = _('Exception from server requesting unit filename for relative path [%s]')
msg = msg % relative_path
_logger.exception(msg)
raise ImporterConduitException(e), None, sys.exc_info()[2]
def save_unit(self, unit):
"""
Performs two distinct steps on the Pulp server:
- Creates or updates Pulp's knowledge of the content unit.
- Associates the unit to the repository being synchronized.
If a unit with the provided unit key already exists, it is updated with
the attributes on the passed-in unit.
A reference to the provided unit is returned from this call. This call
will populate the unit's id field with the UUID for the unit.
:param unit: unit object returned from the init_unit call
:type unit: Unit
:return: object reference to the provided unit, its state updated from the call
:rtype: Unit
"""
try:
association_manager = manager_factory.repo_unit_association_manager()
# Save or update the unit
pulp_unit = common_utils.to_pulp_unit(unit)
unit.id = self._update_unit(unit, pulp_unit)
# Associate it with the repo
association_manager.associate_unit_by_id(
self.repo_id, unit.type_id, unit.id, self.association_owner_type,
self.association_owner_id)
return unit
except Exception, e:
_logger.exception(_('Content unit association failed [%s]' % str(unit)))
raise ImporterConduitException(e), None, sys.exc_info()[2]
def _update_unit(self, unit, pulp_unit):
"""
Update a unit. If it is not found, add it.
:param unit: the unit to be updated
:type unit: pulp.plugins.model.Unit
:param pulp_unit: the unit to be updated, as a dict
:type pulp_unit: dict
:return: id of the updated unit
:rtype: basestring
"""
content_query_manager = manager_factory.content_query_manager()
content_manager = manager_factory.content_manager()
try:
existing_unit = content_query_manager.get_content_unit_by_keys_dict(unit.type_id,
unit.unit_key)
unit_id = existing_unit['_id']
content_manager.update_content_unit(unit.type_id, unit_id, pulp_unit)
self._updated_count += 1
return unit_id
except MissingResource:
_logger.debug(_('cannot update unit; does not exist. adding instead.'))
return self._add_unit(unit, pulp_unit)
def _add_unit(self, unit, pulp_unit):
"""
Add a unit. If it already exists, update it.
This deals with a race condition where a unit might try to be updated,
but does not exist. Before this method can complete, another workflow
might add that same unit, causing the DuplicateKeyError below. This can
happen if two syncs are running concurrently of repositories that have
overlapping content.
:param unit: the unit to be updated
:type unit: pulp.plugins.model.Unit
:param pulp_unit: the unit to be updated, as a dict
:type pulp_unit: dict
:return: id of the updated unit
:rtype: basestring
"""
content_manager = manager_factory.content_manager()
try:
unit_id = content_manager.add_content_unit(unit.type_id, None, pulp_unit)
self._added_count += 1
return unit_id
except DuplicateKeyError:
_logger.debug(_('cannot add unit; already exists. updating instead.'))
return self._update_unit(unit, pulp_unit)
def link_unit(self, from_unit, to_unit, bidirectional=False):
"""
Creates a reference between two content units. The semantics of what
this relationship means depends on the types of content units being
used; this call simply ensures that Pulp will save and make available
the indication that a reference exists from one unit to another.
By default, the reference will only exist on the from_unit side. If
the bidirectional flag is set to true, a second reference will be created
on the to_unit to refer back to the from_unit.
Units passed to this call must have their id fields set by the Pulp server.
@param from_unit: owner of the reference
@type from_unit: L{Unit}
@param to_unit: will be referenced by the from_unit
@type to_unit: L{Unit}
"""
content_manager = manager_factory.content_manager()
try:
content_manager.link_referenced_content_units(from_unit.type_id, from_unit.id,
to_unit.type_id, [to_unit.id])
if bidirectional:
content_manager.link_referenced_content_units(to_unit.type_id, to_unit.id,
from_unit.type_id, [from_unit.id])
except Exception, e:
_logger.exception(_('Child link from parent [%(parent)s] to child [%(child)s] failed' %
{'parent': str(from_unit), 'child': str(to_unit)}))
raise ImporterConduitException(e), None, sys.exc_info()[2]
class StatusMixin(object):
def __init__(self, report_id, exception_class):
self.report_id = report_id
self.exception_class = exception_class
self.progress_report = {}
self.task_id = get_current_task_id()
def set_progress(self, status):
"""
Informs the server of the current state of the publish operation. The
contents of the status is dependent on how the distributor
implementation chooses to divide up the publish process.
@param status: contains arbitrary data to describe the state of the
publish; the contents may contain whatever information is relevant
to the distributor implementation so long as it is serializable
"""
if self.task_id is None:
# not running within a task
return
try:
self.progress_report[self.report_id] = status
TaskStatus.objects(task_id=self.task_id).update_one(
set__progress_report=self.progress_report)
except Exception, e:
_logger.exception(
'Exception from server setting progress for report [%s]' % self.report_id)
try:
_logger.error('Progress value: %s' % str(status))
except Exception:
# Best effort to print this, but if its that grossly unserializable
# the log will tank and we don't want that exception to bubble up
pass
raise self.exception_class(e), None, sys.exc_info()[2]
class PublishReportMixin(object):
def build_success_report(self, summary, details):
"""
Creates the PublishReport instance that needs to be returned to the Pulp
server at the end of the publish_repo call.
@param summary: short log of the publish; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the publish; may be None
@type details: any serializable
"""
r = PublishReport(True, summary, details)
return r
def build_failure_report(self, summary, details):
"""
Creates the PublishReport instance that needs to be returned to the Pulp
server at the end of the publish_repo call. The report built in this
fashion will indicate the publish operation has gracefully failed
(as compared to an unexpected exception bubbling up).
@param summary: short log of the publish; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the publish; may be None
@type details: any serializable
"""
r = PublishReport(False, summary, details)
return r
def build_cancel_report(self, summary, details):
"""
Creates the PublishReport instance that needs to be returned to the Pulp
server at the end of the publish_repo call. The report built in this
fashion will indicate the publish operation has been cancelled.
@param summary: short log of the publish; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the publish; may be None
@type details: any serializable
"""
r = PublishReport(False, summary, details)
r.canceled_flag = True
return r
def do_get_repo_units(repo_id, criteria, exception_class, as_generator=False):
"""
Performs a repo unit association query. This is split apart so we can have
custom mixins with different signatures.
"""
try:
association_query_manager = manager_factory.repo_unit_association_query_manager()
# Use a get_units as_generator here and cast to a list later, if necessary.
units = association_query_manager.get_units(repo_id, criteria=criteria, as_generator=True)
# Load all type definitions so we don't hammer the database.
type_defs = dict((t['id'], t) for t in types_db.all_type_definitions())
# Transfer object generator.
def _transfer_object_generator():
for u in units:
yield common_utils.to_plugin_associated_unit(u, type_defs[u['unit_type_id']])
if as_generator:
return _transfer_object_generator()
# Maintain legacy behavior by default.
return list(_transfer_object_generator())
except Exception, e:
_logger.exception(
'Exception from server requesting all content units for repository [%s]' % repo_id)
raise exception_class(e), None, sys.exc_info()[2]
| gpl-2.0 | -6,392,641,655,943,696,000 | 38.994374 | 100 | 0.632578 | false |
MadeiraCloud/salt | sources/salt/log/handlers/sentry_mod.py | 1 | 6272 | # -*- coding: utf-8 -*-
'''
Sentry Logging Handler
======================
.. versionadded:: 0.17.0
This module provides a `Sentry`_ logging handler.
.. admonition:: Note
The `Raven`_ library needs to be installed on the system for this
logging handler to be available.
Configuring the python `Sentry`_ client, `Raven`_, should be done under the
``sentry_handler`` configuration key.
At the bare minimum, you need to define the `DSN`_. As an example:
.. code-block:: yaml
sentry_handler:
dsn: https://pub-key:[email protected]/app-id
More complex configurations can be achieved, for example:
.. code-block:: yaml
sentry_handler:
servers:
- https://sentry.example.com
- http://192.168.1.1
project: app-id
public_key: deadbeefdeadbeefdeadbeefdeadbeef
secret_key: beefdeadbeefdeadbeefdeadbeefdead
All the client configuration keys are supported, please see the
`Raven client documentation`_.
The default logging level for the sentry handler is ``ERROR``. If you wish
to define a different one, define ``log_level`` under the
``sentry_handler`` configuration key:
.. code-block:: yaml
sentry_handler:
dsn: https://pub-key:[email protected]/app-id
log_level: warning
The available log levels are those also available for the salt ``cli``
tools and configuration; ``salt --help`` should give you the required
information.
Threaded Transports
-------------------
Raven's documents rightly suggest using its threaded transport for
critical applications. However, don't forget that if you start having
troubles with Salt after enabling the threaded transport, please try
switching to a non-threaded transport to see if that fixes your problem.
.. _`DSN`: http://raven.readthedocs.org/en/latest/config/index.html#the-sentry-dsn
.. _`Sentry`: http://getsentry.com
.. _`Raven`: http://raven.readthedocs.org
.. _`Raven client documentation`: http://raven.readthedocs.org/en/latest/config/index.html#client-arguments
'''
# Import python libs
import logging
# Import salt libs
from salt.log import LOG_LEVELS
# Import 3rd party libs
try:
import raven
from raven.handlers.logging import SentryHandler
HAS_RAVEN = True
except ImportError:
HAS_RAVEN = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'sentry'
def __virtual__():
if HAS_RAVEN is True:
return __virtualname__
return False
def setup_handlers():
if 'sentry_handler' not in __opts__:
log.debug('No \'sentry_handler\' key was found in the configuration')
return False
options = {}
dsn = get_config_value('dsn')
if dsn is not None:
try:
dsn_config = raven.load(dsn)
options.update({
'project': dsn_config['SENTRY_PROJECT'],
'servers': dsn_config['SENTRY_SERVERS'],
'public_key': dsn_config['SENTRY_PUBLIC_KEY'],
'private_key': dsn_config['SENTRY_SECRET_KEY']
})
except ValueError as exc:
log.info(
'Raven failed to parse the configuration provided '
'DSN: {0}'.format(exc)
)
# Allow options to be overridden if previously parsed, or define them
for key in ('project', 'servers', 'public_key', 'private_key'):
config_value = get_config_value(key)
if config_value is None and key not in options:
log.debug(
'The required \'sentry_handler\' configuration key, '
'{0!r}, is not properly configured. Not configuring '
'the sentry logging handler.'.format(key)
)
return
elif config_value is None:
continue
options[key] = config_value
# site: An optional, arbitrary string to identify this client installation.
options.update({
# site: An optional, arbitrary string to identify this client
# installation
'site': get_config_value('site'),
# name: This will override the server_name value for this installation.
# Defaults to socket.gethostname()
'name': get_config_value('name'),
# exclude_paths: Extending this allow you to ignore module prefixes
# when sentry attempts to discover which function an error comes from
'exclude_paths': get_config_value('exclude_paths', ()),
# include_paths: For example, in Django this defaults to your list of
# INSTALLED_APPS, and is used for drilling down where an exception is
# located
'include_paths': get_config_value('include_paths', ()),
# list_max_length: The maximum number of items a list-like container
# should store.
'list_max_length': get_config_value('list_max_length'),
# string_max_length: The maximum characters of a string that should be
# stored.
'string_max_length': get_config_value('string_max_length'),
# auto_log_stacks: Should Raven automatically log frame stacks
# (including locals) all calls as it would for exceptions.
'auto_log_stacks': get_config_value('auto_log_stacks'),
# timeout: If supported, the timeout value for sending messages to
# remote.
'timeout': get_config_value('timeout', 1),
# processors: A list of processors to apply to events before sending
# them to the Sentry server. Useful for sending additional global state
# data or sanitizing data that you want to keep off of the server.
'processors': get_config_value('processors')
})
client = raven.Client(**options)
try:
handler = SentryHandler(client)
handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')])
return handler
except ValueError as exc:
log.debug(
'Failed to setup the sentry logging handler: {0}'.format(exc),
exc_info=exc
)
def get_config_value(name, default=None):
return __opts__['sentry_handler'].get(name, default)
| apache-2.0 | -1,743,631,411,951,108,900 | 32.010526 | 111 | 0.629145 | false |
town-hall-pinball/project-omega | tests/system/test_gi.py | 1 | 1950 | # Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from pin.lib import p
import unittest
from tests import fixtures
from mock import Mock
class TestGI(unittest.TestCase):
def setUp(self):
fixtures.reset()
p.modes["gi"].enable()
p.gi["gi01"].enable()
def test_power_save(self):
fixtures.loop()
self.assertEquals("enable", p.gi["gi01"].state["schedule"])
p.now = 10 * 60
fixtures.loop()
self.assertEquals("disable", p.gi["gi01"].state["schedule"])
def test_wake(self):
fixtures.loop()
self.assertEquals("enable", p.gi["gi01"].state["schedule"])
p.now = 10 * 60
fixtures.loop()
self.assertEquals("disable", p.gi["gi01"].state["schedule"])
p.switches["start_button"].activate()
fixtures.loop()
self.assertEquals("enable", p.gi["gi01"].state["schedule"])
| mit | -2,559,888,099,575,586,000 | 38.795918 | 77 | 0.699487 | false |
jmescuderojustel/codeyourblogin-python-django-1.7 | src/blog/controllers/author_controller.py | 1 | 1194 | from django.shortcuts import render_to_response, render, redirect
from ..models.author import Author
def signupInSite(request):
if request.method == 'GET':
return render(request, 'users/signup.html', {})
elif request.method == 'POST':
author = Author()
author.name = request.POST['name']
author.email = request.POST['email']
author.password = request.POST['password']
author.save()
request.session['currentUser'] = { 'name': author.name, 'id': author.pk }
return redirect('/management/posts/1')
def loginToSite(request):
if request.method == 'GET':
return render(request, 'users/login.html', {})
elif request.method == 'POST':
author = Author.objects.get(email=request.POST['email'])
if author is None or author.password != request.POST['password']:
return render_to_response('users/login.html', {'error': 1})
else:
request.session['currentUser'] = { 'name': author.name, 'id': author.pk }
return redirect('/management/posts/1')
def logoutFromSite(request):
request.session['currentUser'] = None
return redirect('/')
| mit | 3,862,361,985,306,047,500 | 24.404255 | 85 | 0.620603 | false |
lizardsystem/lizard-rijnmond | lizard_rijnmond/migrations/0007_auto__add_year.py | 1 | 3097 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Year'
db.create_table('lizard_rijnmond_year', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
))
db.send_create_signal('lizard_rijnmond', ['Year'])
def backwards(self, orm):
# Deleting model 'Year'
db.delete_table('lizard_rijnmond_year')
models = {
'lizard_rijnmond.measure': {
'Meta': {'object_name': 'Measure'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.result': {
'Meta': {'object_name': 'Result'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_rijnmond.Measure']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.scenario': {
'Meta': {'object_name': 'Scenario'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.segment': {
'Meta': {'object_name': 'Segment'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintainer': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {})
},
'lizard_rijnmond.strategy': {
'Meta': {'object_name': 'Strategy'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.year': {
'Meta': {'object_name': 'Year'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_rijnmond']
| gpl-3.0 | 8,076,725,355,685,609,000 | 48.15873 | 149 | 0.534065 | false |
gems-uff/noworkflow | capture/noworkflow/now/cmd/__init__.py | 1 | 2147 | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Commands and argument parsers for 'now'"""
from __future__ import (absolute_import, print_function,
division)
import argparse
import sys
import sqlalchemy
from .command import Command, SmartFormatter
from .cmd_run import Run
from .cmd_debug import Debug
from .cmd_list import List
from .cmd_show import Show
from .cmd_diff import Diff
from .cmd_dataflow import Dataflow
from .cmd_export import Export
from .cmd_restore import Restore
from .cmd_vis import Vis
from .cmd_demo import Demo
from .cmd_helper import Helper
from .cmd_history import History
from .cmd_schema import Schema
from .cmd_gc import GC
from ..utils.io import print_msg
def main():
"""Main function"""
from ..utils.functions import version
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=SmartFormatter)
parser.add_argument("-v", "--version", action="version",
version="noWorkflow {}".format(version()))
subparsers = parser.add_subparsers()
commands = [
Run(),
Debug(),
List(),
Show(),
Diff(),
Dataflow(),
Export(),
Restore(),
Vis(),
Demo(),
Helper(),
History(),
Schema(),
GC()
]
for cmd in commands:
cmd.create_parser(subparsers)
if len(sys.argv) == 1:
sys.argv.append("-h")
try:
args, _ = parser.parse_known_args()
args.func(args)
except RuntimeError as exc:
print_msg(exc, True)
except sqlalchemy.exc.OperationalError as exc:
print_msg("invalid noWorkflow database", True)
print_msg("it is probably outdated", True)
__all__ = [
"Command",
"Run",
"Debug",
"List",
"Show",
"Diff",
"Dataflow",
"Export",
"Restore",
"Vis",
"Demo",
"Helper",
"History",
"GC",
"main",
]
| mit | 8,874,603,974,914,559,000 | 23.397727 | 68 | 0.60503 | false |
coin-or/oBB | obb/NAG/setup.py | 1 | 2075 | #!/usr/bin/env python
# Setup script for oBB.
from setuptools import setup
#from numpy.distutils.core import setup, Extension
from sys import version_info, exit
# Make sure the correct version of Python is installed.
if (version_info[:2] < (2,6))or(version_info[0] == 3):
print "oBB requires Python 2.6 to 2.7. Python %d.%d detected" % \
version_info[:2]
exit(-1)
# NAG Library QP Solver Fotran extension paths
#src = ['obb/qpsolver.pyf','obb/qpsolver.f90']
#extr_link_arg = []
#libs = ['$nag_dir/lib/libnag_mkl.so']
#lib_dirs = []
#inc_dirs=['$nag_dir/nag_interface_blocks']
#extr_obj=['$nag_dir/lib/libnag_mkl.so']
# Get package version
exec(open('obb/version.py').read())
# Setup package
setup(
name='oBB',
version=__version__ ,
description='Parallel global optimization of Hessian Lipschitz continuous functions.',
author='J. Fowkes',
author_email='[email protected]',
packages=['obb', 'obb.test'],
scripts=['bin/sins.sh','bin/sins.py'],
url='http://pypi.python.org/pypi/oBB/',
license='LGPLv2',
long_description=open('README.txt').read(),
install_requires=[
"numpy >= 1.3.0",
"mpi4py >= 1.3",
"cvxopt >= 1.1.3",
#"sympy >= 0.7.1",
#"matplotlib >= 1.1.0",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.6",
"Topic :: Scientific/Engineering :: Mathematics",
],
# NAG library QP solver Fotran extension
#ext_modules=[Extension('qpsolver',
# sources=src,
# libraries=libs,
# #library_dirs=[],
# include_dirs=inc_dirs,
# extra_objects=extr_obj,
# ),
#],
zip_safe=False)
| lgpl-3.0 | -6,352,223,212,597,501,000 | 33.016393 | 90 | 0.568193 | false |
smajda/django-nopassword | tests/test_models.py | 1 | 2088 | # -*- coding: utf8 -*-
import time
from django.contrib.auth import authenticate
from django.test.utils import override_settings
from django.utils import unittest
from nopassword.models import LoginCode
from nopassword.utils import get_user_model
class TestLoginCodes(unittest.TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='test_user')
self.inactive_user = get_user_model().objects.create(username='inactive', is_active=False)
def tearDown(self):
self.user.delete()
self.inactive_user.delete()
def test_login_backend(self):
self.code = LoginCode.create_code_for_user(self.user)
self.assertEqual(len(self.code.code), 20)
self.assertIsNotNone(authenticate(username=self.user.username, code=self.code.code))
self.assertEqual(LoginCode.objects.filter(user=self.user, code=self.code.code).count(), 0)
authenticate(username=self.user.username)
self.assertEqual(LoginCode.objects.filter(user=self.user).count(), 1)
self.assertIsNone(LoginCode.create_code_for_user(self.inactive_user))
self.assertIsNone(authenticate(username=self.inactive_user.username))
@override_settings(NOPASSWORD_CODE_LENGTH=8)
def test_shorter_code(self):
self.code = LoginCode.create_code_for_user(self.user)
self.assertEqual(len(self.code.code), 8)
@override_settings(NOPASSWORD_NUMERIC_CODES=True)
def test_numeric_code(self):
self.code = LoginCode.create_code_for_user(self.user)
self.assertEqual(len(self.code.code), 20)
self.assertTrue(self.code.code.isdigit())
def test_next_value(self):
self.code = LoginCode.create_code_for_user(self.user, next='/secrets/')
self.assertEqual(self.code.next, '/secrets/')
@override_settings(NOPASSWORD_LOGIN_CODE_TIMEOUT=1)
def test_code_timeout(self):
self.timeout_code = LoginCode.create_code_for_user(self.user)
time.sleep(3)
self.assertIsNone(authenticate(username=self.user.username, code=self.timeout_code.code))
| mit | 4,837,027,171,841,363,000 | 39.153846 | 98 | 0.706418 | false |
doctoromer/haya-data | client/main.py | 1 | 2118 | """
This module is the main module of the client, and contains
almost all the necessery classes needed to run it.
"""
import Queue
import logging
import optparse
import json
import os
import sys
sys.dont_write_bytecode = True
import network
import logic
def main():
"""The main function of the client."""
# parsing command line argumants
parser = optparse.OptionParser()
parser.add_option('-c', '--config',
dest='config_file', default='logging.json',
help='set the logging configuration file.')
parser.add_option('-d', '--datapath',
dest='data_path', default='data',
help='set the path of the data directory. ' +
'default is \'data\'.')
parser.add_option('-s', '--server',
dest='server_ip', default='127.0.0.1',
help='set the server IP address. default is localhost.')
parser.add_option('-p', '--port',
dest='port', default='2048', type='int',
help='set custom connection port.')
options, args = parser.parse_args()
# configurate the loggers of the threads
with open(options.config_file, 'rb') as f:
config = json.loads(f.read())
logging.config.dictConfig(config)
if not os.path.exists(options.data_path):
os.mkdir(options.data_path)
logic_queue = Queue.Queue()
network_queue = Queue.Queue()
network_receiver_thread = network.NetworkReceiverThread(
server_ip=options.server_ip,
port=options.port,
network_queue=network_queue,
logic_queue=logic_queue)
network_sender_thread = network.NetworkSenderThread(
network_queue=network_queue)
logic_thread = logic.LogicThread(
data_path=options.data_path,
logic_queue=logic_queue,
network_queue=network_queue)
network_receiver_thread.start()
network_sender_thread.start()
logic_thread.start()
if __name__ == '__main__':
main()
| mit | 6,298,264,401,967,966,000 | 28.257143 | 78 | 0.584514 | false |
Azure/azure-sdk-for-python | sdk/netapp/azure-mgmt-netapp/tests/test_snapshot.py | 1 | 4921 | import time
from azure.mgmt.resource import ResourceManagementClient
from devtools_testutils import AzureMgmtTestCase
from azure.mgmt.netapp.models import Volume, Snapshot
from test_volume import create_volume, wait_for_volume, delete_volume
from test_pool import delete_pool
from test_account import delete_account
from setup import *
import azure.mgmt.netapp.models
TEST_SNAPSHOT_1 = 'sdk-py-tests-snapshot-1'
TEST_SNAPSHOT_2 = 'sdk-py-tests-snapshot-2'
def create_snapshot(client, rg=TEST_RG, account_name=TEST_ACC_1, pool_name=TEST_POOL_1, volume_name=TEST_VOL_1,
snapshot_name=TEST_SNAPSHOT_1, location=LOCATION, snapshot_only=False):
if not snapshot_only:
volume = create_volume(client, rg, account_name, pool_name, volume_name)
# be sure the volume is really available
wait_for_volume(client, rg, account_name, pool_name, volume_name)
else:
# we need to get the volume id if we didn't just create it
volume = client.volumes.get(rg, account_name, pool_name, volume_name)
body = Snapshot(location=location, file_system_id=volume.file_system_id)
return client.snapshots.begin_create(rg, account_name, pool_name, volume_name, snapshot_name, body).result()
def delete_snapshot(client, rg, account_name, pool_name, volume_name, snapshot_name, live=False):
client.snapshots.begin_delete(rg, account_name, pool_name, volume_name, snapshot_name).wait()
# wait to be sure it has gone - a workaround for the async nature of certain ARM processes
co = 0
while co < 10:
co += 1
if live:
time.sleep(20)
try:
client.snapshots.get(rg, account_name, pool_name, volume_name, snapshot_name)
except:
# not found is an exception case (status code 200 expected)
# but is what we are waiting for
break
class NetAppAccountTestCase(AzureMgmtTestCase):
def setUp(self):
super(NetAppAccountTestCase, self).setUp()
self.client = self.create_mgmt_client(azure.mgmt.netapp.NetAppManagementClient)
# Before tests are run live a resource group needs to be created along with vnet and subnet
# Note that when tests are run in live mode it is best to run one test at a time.
def test_create_delete_snapshot(self):
create_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, LOCATION)
snapshot_list = self.client.snapshots.list(TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1)
self.assertEqual(len(list(snapshot_list)), 1)
delete_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, self.is_live)
snapshot_list = self.client.snapshots.list(TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1)
self.assertEqual(len(list(snapshot_list)), 0)
delete_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, self.is_live)
delete_pool(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1, self.is_live)
def test_list_snapshots(self):
create_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, LOCATION)
create_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_2, LOCATION, snapshot_only=True)
snapshots = [TEST_SNAPSHOT_1, TEST_SNAPSHOT_2]
snapshot_list = self.client.snapshots.list(TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1)
self.assertEqual(len(list(snapshot_list)), 2)
idx = 0
for snapshot in snapshot_list:
self.assertEqual(snapshot.name, snapshots[idx])
idx += 1
delete_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, self.is_live)
delete_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_2, self.is_live)
delete_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, self.is_live)
delete_pool(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1, self.is_live)
def test_get_snapshot_by_name(self):
create_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, LOCATION)
snapshot = self.client.snapshots.get(TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1)
self.assertEqual(snapshot.name, TEST_ACC_1 + '/' + TEST_POOL_1 + '/' + TEST_VOL_1+ '/' + TEST_SNAPSHOT_1)
delete_snapshot(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, TEST_SNAPSHOT_1, self.is_live)
delete_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, self.is_live)
delete_pool(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1, self.is_live)
| mit | 1,906,641,278,180,577,800 | 51.351064 | 129 | 0.681366 | false |
aoakeson/home-assistant | homeassistant/components/vera.py | 1 | 4180 | """
Support for Vera devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/vera/
"""
import logging
from collections import defaultdict
from requests.exceptions import RequestException
from homeassistant import bootstrap
from homeassistant.const import (
ATTR_SERVICE, ATTR_DISCOVERED,
EVENT_HOMEASSISTANT_STOP, EVENT_PLATFORM_DISCOVERED)
from homeassistant.helpers.entity import Entity
from homeassistant.loader import get_component
REQUIREMENTS = ['pyvera==0.2.8']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'vera'
VERA_CONTROLLER = None
CONF_EXCLUDE = 'exclude'
CONF_LIGHTS = 'lights'
BINARY_SENSOR = 'binary_sensor'
SENSOR = 'sensor'
LIGHT = 'light'
SWITCH = 'switch'
DEVICE_CATEGORIES = {
'Sensor': BINARY_SENSOR,
'Temperature Sensor': SENSOR,
'Light Sensor': SENSOR,
'Humidity Sensor': SENSOR,
'Dimmable Switch': LIGHT,
'Switch': SWITCH,
'Armable Sensor': SWITCH,
'On/Off Switch': SWITCH,
# 'Window Covering': NOT SUPPORTED YET
}
DISCOVER_BINARY_SENSORS = 'vera.binary_sensors'
DISCOVER_SENSORS = 'vera.sensors'
DISCOVER_LIGHTS = 'vera.lights'
DISCOVER_SWITCHES = 'vera.switchs'
VERA_DEVICES = defaultdict(list)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, base_config):
"""Common setup for Vera devices."""
global VERA_CONTROLLER
import pyvera as veraApi
config = base_config.get(DOMAIN)
base_url = config.get('vera_controller_url')
if not base_url:
_LOGGER.error(
"The required parameter 'vera_controller_url'"
" was not found in config"
)
return False
VERA_CONTROLLER, _ = veraApi.init_controller(base_url)
def stop_subscription(event):
"""Shutdown Vera subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
VERA_CONTROLLER.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_subscription)
try:
all_devices = VERA_CONTROLLER.get_devices(
list(DEVICE_CATEGORIES.keys()))
except RequestException:
# There was a network related error connecting to the vera controller.
_LOGGER.exception("Error communicating with Vera API")
return False
exclude = config.get(CONF_EXCLUDE, [])
if not isinstance(exclude, list):
_LOGGER.error("'exclude' must be a list of device_ids")
return False
lights_ids = config.get(CONF_LIGHTS, [])
if not isinstance(lights_ids, list):
_LOGGER.error("'lights' must be a list of device_ids")
return False
for device in all_devices:
if device.device_id in exclude:
continue
dev_type = DEVICE_CATEGORIES.get(device.category)
if dev_type is None:
continue
if dev_type == SWITCH and device.device_id in lights_ids:
dev_type = LIGHT
VERA_DEVICES[dev_type].append(device)
for comp_name, discovery in (((BINARY_SENSOR, DISCOVER_BINARY_SENSORS),
(SENSOR, DISCOVER_SENSORS),
(LIGHT, DISCOVER_LIGHTS),
(SWITCH, DISCOVER_SWITCHES))):
component = get_component(comp_name)
bootstrap.setup_component(hass, component.DOMAIN, config)
hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery,
ATTR_DISCOVERED: {}})
return True
class VeraDevice(Entity):
"""Representation of a Vera devicetity."""
def __init__(self, vera_device, controller):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller
self._name = self.vera_device.name
self.controller.register(vera_device, self._update_callback)
self.update()
def _update_callback(self, _device):
self.update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
| mit | 4,887,484,294,927,713,000 | 28.43662 | 78 | 0.645215 | false |
google/active-qa | px/nmt/context_encoder.py | 1 | 9795 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert the context string into a vector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from third_party.nmt.utils import misc_utils as utils
from px.nmt import model_helper
utils.check_tensorflow_version()
def feed(context_vector, encoder_outputs, encoder_state, hparams):
"""Feed the context vector into to model.
Args:
context_vector: A context vector of [batch, vector_size]
encoder_outputs: The source encoder outputs.
Will be passed into the attention.
encoder_state: The source encoder final hidden state.
Will be passed into decoder initial state.
hparams: Hyperparameters configurations.
Returns:
encoder outputs ans encoder state that have been fed with context.
Raises:
ValueError: if context_feed value is not defined.
"""
# string append. Do nothing
if hparams.context_feed == "append":
return encoder_outputs, encoder_state
# feed the context into the decoder initial hidden state
elif hparams.context_feed == "decoder_hidden_state":
if hparams.context_vector == "last_state":
encoder_state = context_vector
else:
encoder_state = ((tf.contrib.rnn.LSTMStateTuple(
context_vector, context_vector),) * len(encoder_state))
# feed the context into the encoder output
elif hparams.context_feed == "encoder_output":
if hparams.context_vector != "bilstm_full":
context_vector = tf.expand_dims(context_vector, 0)
encoder_outputs = tf.concat([context_vector, encoder_outputs], 0)
else:
raise ValueError("Unknown context_feed mode: {}"
.format(hparams.context_feed))
return encoder_outputs, encoder_state
def get_context_vector(mode, iterator, hparams, vector_size=None):
"""Convert the context string into a vector.
Args:
mode: Must be tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL, or tf.contrib.learn.ModeKeys.INFER.
iterator: A BatchedInput iterator.
hparams: Hyperparameters configurations.
vector_size: context vector size. Will be hparams.num_units if undefined.
Returns:
A context vector tensor of size [batch_size, vector_size].
Raises:
ValueError: if context_vector value is not defined.
"""
if hparams.context_vector == "append":
return None
if vector_size is None:
vector_size = hparams.num_units
# maxpooling over all encoder's outputs (https://arxiv.org/abs/1709.04348).
if hparams.context_vector == "bilstm_pool":
encoder_outputs, _ = _build_lstm_encoder(mode, iterator, hparams)
# maxpool over time axis
context_vector = tf.reduce_max(encoder_outputs, 0)
# get all encoder outputs
elif hparams.context_vector == "bilstm_full":
encoder_outputs, _ = _build_lstm_encoder(mode, iterator, hparams)
return encoder_outputs
# get the last encoder output
elif hparams.context_vector == "bilstm_last":
encoder_outputs, _ = _build_lstm_encoder(mode, iterator, hparams)
# get the last encoder output
context_vector = get_last_encoder_output(encoder_outputs,
iterator.context_sequence_length)
# 4-layers CNN, then pool over all layers (https://arxiv.org/abs/1709.04348).
elif hparams.context_vector == "cnn":
context_vector = get_cnn_vector(mode, iterator, hparams)
# get the last LSTM hidden state.
elif hparams.context_vector == "last_state":
_, encoder_state = _build_lstm_encoder(mode, iterator, hparams)
return encoder_state
else:
raise ValueError("Unknown context_vector mode: {}"
.format(hparams.context_vector))
# resize the context vector to the desired length
resizer = tf.get_variable(
"context_resizer",
shape=(context_vector.get_shape()[1], vector_size),
dtype=tf.float32)
context_vector = tf.tanh(tf.matmul(context_vector, resizer))
return context_vector
def get_embeddings(hparams, iterator):
"""Look up embedding, encoder_emb_inp: [max_time, batch_size, num_units]."""
source = iterator.context
# Make shape [max_time, batch_size].
source = tf.transpose(source)
embedding_context = tf.get_variable(
"embedding_context", [hparams.src_vocab_size, hparams.num_units],
tf.float32)
encoder_emb_inp = tf.nn.embedding_lookup(embedding_context, source)
return encoder_emb_inp
def get_cnn_vector(mode, iterator, hparams, kernels=[3, 3, 3, 3]):
with tf.variable_scope("context_cnn_encoder"):
conv = get_embeddings(hparams, iterator)
# Set axis into [batch_size, max_time, num_units] to simplify CNN operations
conv = tf.transpose(conv, [1, 0, 2])
maxpools = []
for layer, kernel_size in enumerate(kernels):
conv = conv1d(conv, [kernel_size, conv.shape[2].value, hparams.num_units],
layer)
# maxpool on time axis
maxpools.append(tf.reduce_max(conv, 1))
# flatten the unit axis
maxpools = tf.concat(maxpools, -1)
return maxpools
def conv1d(tensor, filter_shape, layer):
weight = tf.get_variable("W_{}".format(layer), filter_shape)
bias = tf.get_variable("b_{}".format(layer), [filter_shape[2]])
conv = tf.nn.conv1d(tensor, weight, stride=1, padding="SAME", name="conv")
return tf.nn.relu(tf.nn.bias_add(conv, bias), name="relu")
def get_last_encoder_output(encoder_outputs, sequence_length):
# Make shape [batch_size, max_time, num_units].
encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
batch_range = tf.range(tf.shape(encoder_outputs)[0])
indices = tf.stack([batch_range, sequence_length - 1], axis=1)
return tf.gather_nd(encoder_outputs, indices)
def _build_lstm_encoder(mode, iterator, hparams):
"""Build an encoder."""
num_layers = hparams.num_encoder_layers
num_residual_layers = hparams.num_residual_layers
with tf.variable_scope("context_rnn_encoder") as scope:
dtype = scope.dtype
encoder_emb_inp = get_embeddings(hparams, iterator)
num_bi_layers = int(num_layers / 2)
num_bi_residual_layers = int(num_residual_layers / 2)
# Shape of encoder_outputs if time majoris True:
# [max_time, batch_size, num_units]
# Shape of encoder_outputs if time major is False:
# [batch_size, max_time, num_units]
encoder_outputs, bi_encoder_state = (
_build_bidirectional_rnn(
inputs=encoder_emb_inp,
sequence_length=iterator.context_sequence_length,
dtype=dtype,
hparams=hparams,
mode=mode,
num_bi_layers=num_bi_layers,
num_bi_residual_layers=num_bi_residual_layers))
if num_bi_layers == 1:
encoder_state = bi_encoder_state
else:
# alternatively concat forward and backward states
encoder_state = []
for layer_id in range(num_bi_layers):
encoder_state.append(bi_encoder_state[0][layer_id]) # forward
encoder_state.append(bi_encoder_state[1][layer_id]) # backward
encoder_state = tuple(encoder_state)
return encoder_outputs, encoder_state
def _build_bidirectional_rnn(inputs,
sequence_length,
dtype,
hparams,
mode,
num_bi_layers,
num_bi_residual_layers,
base_gpu=0):
"""Create and call biddirectional RNN cells.
Args:
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_bi_layers=4` and `num_residual_layers=2`, the last 2 RNN
layers in each RNN cell will be wrapped with `ResidualWrapper`.
base_gpu: The gpu device id to use for the first forward RNN layer. The
i-th forward RNN layer will use `(base_gpu + i) % num_gpus` as its
device id. The `base_gpu` for backward RNN cell is `(base_gpu +
num_bi_layers)`.
Returns:
The concatenated bidirectional output and the bidirectional RNN cell"s
state.
"""
# Construct forward and backward cells
fw_cell = _build_encoder_cell(
hparams, mode, num_bi_layers, num_bi_residual_layers, base_gpu=base_gpu)
bw_cell = _build_encoder_cell(
hparams,
mode,
num_bi_layers,
num_bi_residual_layers,
base_gpu=(base_gpu + num_bi_layers))
bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
fw_cell,
bw_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=True)
return tf.concat(bi_outputs, -1), bi_state
def _build_encoder_cell(hparams,
mode,
num_layers,
num_residual_layers,
base_gpu=0):
"""Build a multi-layer RNN cell that can be used by encoder."""
return model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
num_gpus=hparams.num_gpus,
mode=mode,
base_gpu=base_gpu)
| apache-2.0 | -7,441,171,265,858,019,000 | 34.48913 | 80 | 0.660031 | false |
gracca/fits-for-roi | fits-for-roi.py | 1 | 2686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#-----------------------------------------------------------------------#
# fits-for-roi.py #
# #
# Script to create FITS files from ROI observing output #
# Copyright (C) 2013 Germán A. Racca - <gracca[AT]gmail[DOT]com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#-----------------------------------------------------------------------#
import ast
import pyfits
import numpy as np
# read input file
f = open('d_110801.txt')
lines = f.readlines()
# define some variables
nheadlin = 22
nchannel = 2048
nspectra = len(lines) / nchannel
coef = nheadlin + nchannel + 1
# create a list of "empty" spectra (header + data)
spec = [pyfits.PrimaryHDU() for i in range(nspectra)]
# read numerical data
nums = np.zeros(nchannel*nspectra, dtype='float32')
for i in range(nspectra):
limi = coef * i
lims = limi + nheadlin
nums[nchannel*i:nchannel*(i+1)] = lines[lims:lims+nchannel]
data = np.hsplit(nums, nspectra)
# read the headers
text = []
for i in range(nspectra):
limi = coef * i
lims = limi + nheadlin
text.append(lines[limi:lims-1])
# format the headers
for i, j in enumerate(text):
for m, k in enumerate(j):
l = k.strip().replace("'", "").split("=")
key = l[0].strip()
val = l[1].strip()
if m >= 4 and m <= 19:
val = ast.literal_eval(val)
spec[i].header.update(key, val)
# format the data
for i, j in enumerate(data):
spec[i].data = j
# create fits files
name = 'd_110801'
for i in range(nspectra):
n = name + '_' + str(i+1) + '.fits'
spec[i].writeto(n, clobber=True)
| gpl-3.0 | 5,024,605,194,523,338,000 | 33.423077 | 73 | 0.512849 | false |
javivicente/e-lactancia | django_project/lactancia/urls.py | 1 | 3924 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
## EN CASTELLANO
# ex: /
url(r'^$', views.landing, name='landing'),
url(r'^buscar/$', views.buscar, name='buscar'),
url(r'^cookies/$', views.cookies, name='cookies'),
url(r'^privacidad/$', views.privacidad, name='privacidad'),
url(r'^aviso_legal/$', views.aviso_legal, name='aviso_legal'),
url(r'^avales/$', views.avales, name='avales'),
url(r'^patrocinadores/$', views.patrocinadores, name='patrocinadores'),
url(r'^donativos/$', views.donativos, name='donativos'),
url(r'^donativo-exito/$', views.donativo_exito, name='donativo_exito'),
url(r'^donativo-cancelado/$', views.donativo_cancelado, name='donativo_cancelado'),
url(r'^newsletter-error/$', views.boletin_error, name='boletin_error'),
url(r'^gracias-newsletter/$', views.boletin_ok, name='boletin_ok'),
url(r'^alertas/$', views.alerta_riesgos, name='alerta_riesgos'),
url(r'^creditos/$', views.creditos, name='creditos'),
url(r'^lista_negra/$', views.lista_negra, name='lista_negra'),
url(r'^limpia_cache/$', views.limpia_cache, name='limpia_cache'),
url(r'^estadisticas/$', views.estadisticas, name='estadisticas'),
url(r'^estadisticas_ES/$', views.estadisticas_ES, name='estadisticas_ES'),
url(r'^producto/(?P<producto_id>\d+)/$', views.detalle_p, name='detalle_p'),
url(r'^breastfeeding/(?P<slug>[\w-]+)/product/$', views.ficha_producto, name='ficha_producto'),
url(r'^grupo/(?P<grupo_id>\d+)/$', views.detalle_g, name='detalle_g'),
url(r'^breastfeeding/(?P<slug>[\w-]+)/group/$', views.ficha_grupo, name='ficha_grupo'),
url(r'^marca/(?P<marca_id>\d+)/$', views.detalle_m, name='detalle_m'),
url(r'^breastfeeding/(?P<slug>[\w-]+)/tradename/$', views.ficha_marca, name='ficha_marca'),
url(r'^sinonimo/(?P<alias_id>\d+)/$', views.detalle_ap, name='detalle_ap'),
url(r'^breastfeeding/(?P<slug>[\w-]+)/synonym/$', views.ficha_alias, name='ficha_alias'),
url(r'^otra_escritura/(?P<otra_escritura_id>\d+)/$', views.detalle_oe, name='detalle_oe'),
url(r'^breastfeeding/(?P<slug>[\w-]+)/writing/$', views.ficha_otra_escritura, name='ficha_otra_escritura'),
url(r'^download-citation/$', views.download_citation, name='download_citation'),
url(r'^API/get_list_of_terms$', views.get_list_of_terms, name='get_list_of_terms'),
url(r'^API/get_date_last_update_list_of_terms$', views.get_date_last_update_list_of_terms, name='get_date_last_update_list_of_terms'),
url(r'^API/get_term/(?P<prod_id>\d+)$', views.get_term, name='get_term'),
]
'''
# url(r'^resultado$', views.primer_resultado, name='primer_resultado'),
#
#
# url(r'^estadisticas$', views.estadisticas, name='estadisticas'),
# url(r'^estadisticas_ES$', views.estadisticas_ES, name='estadisticas_ES'),
# url(r'^lista_negra$', views.lista_negra, name='lista_negra'),
# url(r'^donativos$', views.donativos, name='donativos'),
# url(r'^donativo_exito$', views.donativo_exito, name='donativo_exito'),
# url(r'^donativo_cancelado$', views.donativo_cancelado, name='donativo_cancelado'),
# url(r'^boletin_error$', views.boletin_error, name='boletin_error'),
# url(r'^cookies_es$', views.cookies_es, name='cookies_es'),
# url(r'^privacidad$', views.privacidad, name='privacidad'),
#
'''
'''
# ex: /detalleproducto/5/
url(r'^producto/(?P<producto_id>\d+)$', views.detalle_p, name='detalle_p'),
# ex: /detalle_alias/5/
url(r'^alias_es/(?P<alias_id>\d+)$', views.detalle_ap, name='detalle_ap'),
# ex: /detale_grupo/5/
url(r'^grupo/(?P<grupo_id>\d+)$', views.detalle_g, name='detalle_g'),
# ex: /detalle_marca/5/
url(r'^marca/(?P<marca_id>\d+)$', views.detalle_m, name='detalle_m'),
'''
| gpl-2.0 | -5,510,966,935,095,292,000 | 53.267606 | 138 | 0.619011 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.