repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fastflo/emma | emmalib/providers/mysql/MySqlDb.py | 1 | 3382 | """
MySQL Database class handler
"""
# -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt ([email protected])
# 2014 Nickolay Karnaukhov ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from emmalib.providers.mysql.MySqlTable import MySqlTable
class MySqlDb(object):
"""
Class wraps MySQL Database logic
"""
def __init__(self, host, name=None):
self.handle = host.handle
self.host = host
self.charset = self.host.charset
if name is not None:
self.name = name
self.expanded = False
self.status_headers = []
self.tables = {}
else:
# print "unpickling tables!", self.handle
for name, table in self.tables.iteritems():
table.handle = self.handle
# self.id = id
def refresh(self):
"""
:return:
"""
self.host.select_database(self)
if self.host.is_at_least_version("4.1.1"):
self.host.query("show variables like 'character_set_database'")
result = self.handle.store_result()
row = result.fetch_row()
self.charset = row[0][1]
# print "using database charset %r" % self.charset
else:
self.charset = self.host.charset
# print "using server charset %r for this database" % self.charset
if not self.host.query("show table status"):
return
new_tables = []
result = self.handle.store_result()
self.status_headers = []
for row in result.describe():
self.status_headers.append(row[0])
old = dict(zip(self.tables.keys(), range(len(self.tables))))
for row in result.fetch_row(0):
if not row[0] in old:
# print "new table", row[0]
self.tables[row[0]] = MySqlTable(self, row, result.describe())
new_tables.append(row[0])
else:
# print "known table", row[0]
# todo update self.tables[row[0]] with row!
del old[row[0]]
for table in old:
# print "destroy table", table
del self.tables[table]
return new_tables
def query(self, query, check_use=True, append_to_log=True):
"""
:param query:
:param check_use:
:param append_to_log:
:return:
"""
self.host.select_database(self)
return self.host.query(query, check_use, append_to_log)
def get_escaped_name(self):
"""
:@return : str
"""
return self.name.replace('&', '&').replace('<', '<')
| gpl-2.0 | -5,966,158,723,126,046,000 | 33.161616 | 78 | 0.582496 | false | 3.974148 | false | false | false |
pinkevich/django-project-template | run.py | 5 | 1385 | #!/usr/bin/env python
import sys
from concurrent.futures import ThreadPoolExecutor
from tornado import web, wsgi
from tornado.concurrent import run_on_executor
from tornado.web import url
from tornado.ioloop import IOLoop
from tornado.options import options, define
from wsgi import application as django_app
from django.conf import settings
define('port', type=int, default=8000)
define('host', type=str, default='127.0.0.1')
options.parse_command_line()
APP_SETTINGS = {
'static_path': settings.STATIC_ROOT,
'debug': settings.DEBUG,
'gzip': True}
class ThreadMixin(object):
executor = ThreadPoolExecutor(max_workers=4)
class FallbackHandler(ThreadMixin, web.FallbackHandler):
@run_on_executor
def prepare(self):
self.fallback(self.request)
self._finished = True
application = web.Application([
url(r'/media/(.*)', web.StaticFileHandler, {'path': settings.MEDIA_ROOT}),
url(r'.*', FallbackHandler, {'fallback': wsgi.WSGIContainer(django_app)})], **APP_SETTINGS)
def main():
if APP_SETTINGS['debug']:
sys.stdout.write('Host: {}\n'.format(options.host))
sys.stdout.write('Port: {}\n\n'.format(options.port))
application.listen(options.port, options.host)
try:
IOLoop.instance().start()
except KeyboardInterrupt:
IOLoop.instance().stop()
if __name__ == '__main__':
main()
| mit | -4,005,865,876,475,525,000 | 25.132075 | 95 | 0.692419 | false | 3.693333 | false | false | false |
R-daneel-olivaw/mutation-tolerance-voting | MutationFunc/threading/result_collator.py | 1 | 3138 | '''
Created on Apr 12, 2015
@author: Akshat
'''
import pandas as pd
import matplotlib.pyplot as plt
class ResultCollator(object):
'''
classdocs
'''
distance_df = None
def __init__(self, worker_list, output_directory):
'''
Constructor
'''
self.output_directory = output_directory
self.worker_list = worker_list
def collate(self):
rows = []
for worker in self.worker_list:
dist_irv = None
dist_plu = None
dist_pluL = None
dist_stv = None
row = {}
if hasattr(worker,'distance_stv'):
dist_stv = worker.distance_stv
row['stv_dist'] = dist_stv[0]
row['stv_pval'] = dist_stv[1]
if hasattr(worker,'distance_irv'):
dist_irv = worker.distance_irv
row['irv_dist'] = dist_irv[0]
row['irv_pval'] = dist_irv[1]
if hasattr(worker,'distance_plu'):
dist_plu = worker.distance_plu
row['plurality_dist'] = dist_plu[0]
row['plurality_dpval'] = dist_plu[1]
if hasattr(worker,'distance_pluatL'):
dist_pluL = worker.distance_pluatL
row['plurality_at_large_dist'] = dist_pluL[0]
row['plurality_at_large_pval'] = dist_pluL[1]
rows.append(row)
df = pd.DataFrame(rows)
self.distance_df = df
ocsv_path = self.output_directory + '/result_distance.csv'
with open(ocsv_path, 'a') as f:
df.to_csv(f, header=False,index=False)
def graph_results(self):
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.set_xlim([0,10])
# ax.set_ylim([0,10])
ax.set_title('All Percentiles')
ax.set_xlabel("index")
ax.set_ylabel("Price($)")
ax.scatter(self.distance_df.index.values, self.distance_df['stv'], edgecolors='none', s=5, color='red', label='50 percentile', alpha=0.5)
# ax.scatter(df.index.values, df['75 percentile'], edgecolors='none', s=5, color='blue', label='75 percentile', alpha=0.5)
# ax.scatter(df.index.values, df['90 percentile'], edgecolors='none', s=5, color='yellow', label='90 percentile', alpha=0.5)
# ax.scatter(df.index.values, df['95 percentile'], edgecolors='none', s=5, color='green', label='95 percentile', alpha=0.5)
# ax.scatter(df.index.values, df['100 percentile'], edgecolors='none', s=5, color='magenta', label='100 percentile', alpha=0.5)
ax.set_ylim(80)
ax.set_xlim(0)
ax.legend(loc=0, scatterpoints=1)
# ax.scatter(scipy.randn(100), scipy.randn(100), c='r')
fig.set_size_inches(15, 5)
fig.savefig(self.output_directory + 'distance' + '_ALL.png', bbox_inches='tight')
| lgpl-3.0 | -1,915,230,509,223,410,400 | 34.068966 | 145 | 0.505099 | false | 3.586286 | false | false | false |
SanPen/GridCal | src/GridCal/Engine/Devices/hvdc_line.py | 1 | 21079 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from GridCal.Engine.basic_structures import Logger
from GridCal.Engine.Devices.bus import Bus
from GridCal.Engine.Devices.enumerations import BranchType
from GridCal.Engine.Devices.underground_line import UndergroundLineType
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
from GridCal.Engine.Devices.tower import Tower
def firing_angles_to_reactive_limits(P, alphamin, alphamax):
# minimum reactive power calculated under assumption of no overlap angle
# i.e. power factor equals to tan(alpha)
Qmin = P * np.tan(alphamin)
# maximum reactive power calculated when overlap angle reaches max
# value (60 deg). I.e.
# cos(phi) = 1/2*(cos(alpha)+cos(delta))
# Q = P*tan(phi)
phi = np.arccos(0.5 * (np.cos(alphamax) + np.cos(np.deg2rad(60))))
Qmax = P * np.tan(phi)
# if Qmin < 0:
# Qmin = -Qmin
#
# if Qmax < 0:
# Qmax = -Qmax
return Qmin, Qmax
class HvdcLine(EditableDevice):
"""
The **Line** class represents the connections between nodes (i.e.
:ref:`buses<bus>`) in **GridCal**. A branch is an element (cable, line, capacitor,
transformer, etc.) with an electrical impedance. The basic **Branch** class
includes basic electrical attributes for most passive elements, but other device
types may be passed to the **Branch** constructor to configure it as a specific
type.
For example, a transformer may be created with the following code:
.. code:: ipython3
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Devices import *
from GridCal.Engine.Devices.types import *
# Create grid
grid = MultiCircuit()
# Create buses
POI = Bus(name="POI",
vnom=100, #kV
is_slack=True)
grid.add_bus(POI)
B_C3 = Bus(name="B_C3",
vnom=10) #kV
grid.add_bus(B_C3)
# Create transformer types
SS = TransformerType(name="SS",
hv_nominal_voltage=100, # kV
lv_nominal_voltage=10, # kV
nominal_power=100, # MVA
copper_losses=10000, # kW
iron_losses=125, # kW
no_load_current=0.5, # %
short_circuit_voltage=8) # %
grid.add_transformer_type(SS)
# Create transformer
X_C3 = Branch(bus_from=POI,
bus_to=B_C3,
name="X_C3",
branch_type=BranchType.Transformer,
template=SS,
)
# Add transformer to grid
grid.add_branch(X_C3)
Refer to the :class:`GridCal.Engine.Devices.branch.TapChanger` class for an example
using a voltage regulator.
Arguments:
**bus_from** (:ref:`Bus`): "From" :ref:`bus<Bus>` object
**bus_to** (:ref:`Bus`): "To" :ref:`bus<Bus>` object
**name** (str, "Branch"): Name of the branch
**r** (float, 1e-20): Branch resistance in per unit
**x** (float, 1e-20): Branch reactance in per unit
**g** (float, 1e-20): Branch shunt conductance in per unit
**b** (float, 1e-20): Branch shunt susceptance in per unit
**rate** (float, 1.0): Branch rate in MVA
**tap** (float, 1.0): Branch tap module
**shift_angle** (int, 0): Tap shift angle in radians
**active** (bool, True): Is the branch active?
**tolerance** (float, 0): Tolerance specified for the branch impedance in %
**mttf** (float, 0.0): Mean time to failure in hours
**mttr** (float, 0.0): Mean time to recovery in hours
**r_fault** (float, 0.0): Mid-line fault resistance in per unit (SC only)
**x_fault** (float, 0.0): Mid-line fault reactance in per unit (SC only)
**fault_pos** (float, 0.0): Mid-line fault position in per unit (0.0 = `bus_from`, 0.5 = middle, 1.0 = `bus_to`)
**branch_type** (BranchType, BranchType.Line): Device type enumeration (ex.: :class:`GridCal.Engine.Devices.transformer.TransformerType`)
**length** (float, 0.0): Length of the branch in km
**vset** (float, 1.0): Voltage set-point of the voltage controlled bus in per unit
**temp_base** (float, 20.0): Base temperature at which `r` is measured in °C
**temp_oper** (float, 20.0): Operating temperature in °C
**alpha** (float, 0.0033): Thermal constant of the material in °C
**bus_to_regulated** (bool, False): Is the `bus_to` voltage regulated by this branch?
**template** (BranchTemplate, BranchTemplate()): Basic branch template
"""
def __init__(self, bus_from: Bus = None, bus_to: Bus = None, name='HVDC Line', idtag=None, active=True,
rate=1.0, Pset=0.0, loss_factor=0.0, Vset_f=1.0, Vset_t=1.0, length=1.0, mttf=0.0, mttr=0.0,
overload_cost=1000.0, min_firing_angle_f=-1.0, max_firing_angle_f=1.0, min_firing_angle_t=-1.0,
max_firing_angle_t=1.0, active_prof=np.ones(0, dtype=bool), rate_prof=np.zeros(0),
Pset_prof=np.zeros(0), Vset_f_prof=np.ones(0), Vset_t_prof=np.ones(0), overload_cost_prof=np.zeros(0),
contingency_factor=1.0):
"""
HVDC Line model
:param bus_from: Bus from
:param bus_to: Bus to
:param idtag: id tag of the line
:param name: name of the line
:param active: Is the line active?
:param rate: Line rate in MVA
:param Pset: Active power set point
:param loss_factor: Losses factor (p.u.)
:param Vset_f: Voltage set point at the "from" side
:param Vset_t: Voltage set point at the "to" side
:param min_firing_angle_f: minimum firing angle at the "from" side
:param max_firing_angle_f: maximum firing angle at the "from" side
:param min_firing_angle_t: minimum firing angle at the "to" side
:param max_firing_angle_t: maximum firing angle at the "to" side
:param overload_cost: cost of a line overload in EUR/MW
:param mttf: Mean time to failure in hours
:param mttr: Mean time to recovery in hours
:param length: line length in km
:param active_prof: profile of active states (bool)
:param rate_prof: Profile of ratings in MVA
:param Pset_prof: Active power set points profile
:param Vset_f_prof: Voltage set points at the "from" side profile
:param Vset_t_prof: Voltage set points at the "to" side profile
:param overload_cost_prof: Profile of overload costs in EUR/MW
"""
EditableDevice.__init__(self,
name=name,
idtag=idtag,
active=active,
device_type=DeviceType.HVDCLineDevice,
editable_headers={'name': GCProp('', str, 'Name of the line.'),
'idtag': GCProp('', str, 'Unique ID'),
'bus_from': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "from" side of the line.'),
'bus_to': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "to" side of the line.'),
'active': GCProp('', bool, 'Is the line active?'),
'rate': GCProp('MVA', float, 'Thermal rating power of the line.'),
'contingency_factor': GCProp('p.u.', float,
'Rating multiplier for contingencies.'),
'Pset': GCProp('MW', float, 'Set power flow.'),
'loss_factor': GCProp('p.u.', float,
'Losses factor.\n'
'The losses are computed as losses=Pfset x Ploss'),
'Vset_f': GCProp('p.u.', float, 'Set voltage at the from side'),
'Vset_t': GCProp('p.u.', float, 'Set voltage at the to side'),
'min_firing_angle_f': GCProp('rad', float,
'minimum firing angle at the '
'"from" side.'),
'max_firing_angle_f': GCProp('rad', float,
'maximum firing angle at the '
'"from" side.'),
'min_firing_angle_t': GCProp('rad', float,
'minimum firing angle at the '
'"to" side.'),
'max_firing_angle_t': GCProp('rad', float,
'maximum firing angle at the '
'"to" side.'),
'mttf': GCProp('h', float, 'Mean time to failure, '
'used in reliability studies.'),
'mttr': GCProp('h', float, 'Mean time to recovery, '
'used in reliability studies.'),
'length': GCProp('km', float, 'Length of the branch '
'(not used for calculation)'),
'overload_cost': GCProp('e/MWh', float,
'Cost of overloads. Used in OPF.'),
},
non_editable_attributes=['bus_from', 'bus_to', 'idtag'],
properties_with_profile={'active': 'active_prof',
'rate': 'rate_prof',
'Pset': 'Pset_prof',
'Vset_f': 'Vset_f_prof',
'Vset_t': 'Vset_t_prof',
'overload_cost': 'overload_cost_prof'})
# connectivity
self.bus_from = bus_from
self.bus_to = bus_to
# List of measurements
self.measurements = list()
# line length in km
self.length = length
self.Pset = Pset
self.loss_factor = loss_factor
self.mttf = mttf
self.mttr = mttr
self.overload_cost = overload_cost
self.Vset_f = Vset_f
self.Vset_t = Vset_t
# converter / inverter firing angles
self.min_firing_angle_f = min_firing_angle_f
self.max_firing_angle_f = max_firing_angle_f
self.min_firing_angle_t = min_firing_angle_t
self.max_firing_angle_t = max_firing_angle_t
self.Qmin_f, self.Qmax_f = firing_angles_to_reactive_limits(self.Pset,
self.min_firing_angle_f,
self.max_firing_angle_f)
self.Qmin_t, self.Qmax_t = firing_angles_to_reactive_limits(self.Pset,
self.min_firing_angle_t,
self.max_firing_angle_t)
self.overload_cost_prof = overload_cost_prof
self.Pset_prof = Pset_prof
self.active_prof = active_prof
self.Vset_f_prof = Vset_f_prof
self.Vset_t_prof = Vset_t_prof
# branch rating in MVA
self.rate = rate
self.contingency_factor = contingency_factor
self.rate_prof = rate_prof
def get_from_and_to_power(self):
"""
Get the power set at both ends accounting for meaningful losses
:return: power from, power to
"""
A = int(self.Pset > 0)
B = 1 - A
Pf = - self.Pset * A + self.Pset * (1 - self.loss_factor) * B
Pt = self.Pset * A * (1 - self.loss_factor) - self.Pset * B
return Pf, Pt
def get_from_and_to_power_profiles(self):
"""
Get the power set at both ends accounting for meaningful losses
:return: power from, power to
"""
A = (self.Pset_prof > 0).astype(int)
B = 1 - A
Pf = - self.Pset_prof * A + self.Pset_prof * (1 - self.loss_factor) * B
Pt = self.Pset_prof * A * (1 - self.loss_factor) - self.Pset_prof * B
return Pf, Pt
def copy(self, bus_dict=None):
"""
Returns a copy of the branch
@return: A new with the same content as this
"""
if bus_dict is None:
f = self.bus_from
t = self.bus_to
else:
f = bus_dict[self.bus_from]
t = bus_dict[self.bus_to]
'''
bus_from: Bus = None,
bus_to: Bus = None,
name='HVDC Line',
idtag=None,
active=True,
rate=1.0, Pfset=0.0,
loss_factor=0.0,
Vset_f=1.0,
Vset_t=1.0,
length=1.0,
mttf=0.0,
mttr=0.0,
overload_cost=1000.0,
min_firing_angle_f=-1.0,
max_firing_angle_f=1.0,
min_firing_angle_t=-1.0,
max_firing_angle_t=1.0,
active_prof=np.ones(0, dtype=bool),
rate_prof=np.zeros(0),
Pset_prof=np.zeros(0),
Vset_f_prof=np.ones(0),
Vset_t_prof=np.ones(0),
overload_cost_prof=np.zeros(0)
'''
b = HvdcLine(bus_from=f,
bus_to=t,
name=self.name,
idtag=self.idtag,
rate=self.rate,
active=self.active,
loss_factor=self.loss_factor,
Vset_f=self.Vset_f,
Vset_t=self.Vset_t,
length=self.length,
mttf=self.mttf,
mttr=self.mttr,
overload_cost=self.overload_cost,
min_firing_angle_f=self.min_firing_angle_f,
max_firing_angle_f=self.max_firing_angle_f,
min_firing_angle_t=self.min_firing_angle_t,
max_firing_angle_t=self.max_firing_angle_t,
active_prof=self.active_prof,
rate_prof=self.rate_prof,
Pset_prof=self.Pset_prof,
Vset_f_prof=self.Vset_f_prof,
Vset_t_prof=self.Vset_t_prof,
overload_cost_prof=self.overload_cost_prof)
b.measurements = self.measurements
b.active_prof = self.active_prof.copy()
return b
def get_save_data(self):
"""
Return the data that matches the edit_headers
:return:
"""
data = list()
for name, properties in self.editable_headers.items():
obj = getattr(self, name)
if properties.tpe == DeviceType.BusDevice:
obj = obj.idtag
elif properties.tpe not in [str, float, int, bool]:
obj = str(obj)
data.append(obj)
return data
def get_properties_dict(self):
"""
Get json dictionary
:return:
"""
d = {'id': self.idtag,
'type': 'hvdc',
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'bus_from': self.bus_from.idtag,
'bus_to': self.bus_to.idtag,
'active': self.active,
'rate': self.rate,
'r': 0,
'length': self.length,
'loss_factor': self.loss_factor,
'vset_from': self.Vset_f,
'vset_to': self.Vset_t,
'Pset': self.Pset,
'min_firing_angle_f': self.min_firing_angle_f,
'max_firing_angle_f': self.max_firing_angle_f,
'min_firing_angle_t': self.min_firing_angle_t,
'max_firing_angle_t': self.max_firing_angle_t,
'overload_cost': self.overload_cost,
'base_temperature': 20,
'operational_temperature': 20,
'alpha': 0.00330,
'locations': []
}
return d
def get_profiles_dict(self):
"""
:return:
"""
if self.active_prof is not None:
active_prof = self.active_prof.tolist()
rate_prof = self.rate_prof.tolist()
pset_prof = self.Pset_prof.tolist()
vset_prof_f = self.Vset_f_prof
vset_prof_t = self.Vset_t_prof
cost_prof = self.overload_cost_prof.tolist()
else:
active_prof = list()
rate_prof = list()
pset_prof = list()
cost_prof = list()
vset_prof_f = list()
vset_prof_t = list()
return {'id': self.idtag,
'active': active_prof,
'rate': rate_prof,
'Pset': pset_prof,
'vset_from': vset_prof_f,
'vset_to': vset_prof_t,
'overload_cost': cost_prof}
def get_units_dict(self):
"""
Get units of the values
"""
return {'rate': 'MW',
'length': 'km',
'loss_factor': '%',
'vset_f': 'p.u.',
'vset_t': 'p.u.',
'pset': 'MW',
'min_firing_angle_f': 'radians',
'max_firing_angle_f': 'radians',
'min_firing_angle_t': 'radians',
'max_firing_angle_t': 'radians',
'overload_cost': '€/MWh'}
def plot_profiles(self, time_series=None, my_index=0, show_fig=True):
"""
Plot the time series results of this object
:param time_series: TimeSeries Instance
:param my_index: index of this object in the simulation
:param show_fig: Show the figure?
"""
if time_series is not None:
fig = plt.figure(figsize=(12, 8))
ax_1 = fig.add_subplot(211)
ax_2 = fig.add_subplot(212, sharex=ax_1)
x = time_series.results.time
# loading
y = self.Pset_prof / (self.rate_prof + 1e-9) * 100.0
df = pd.DataFrame(data=y, index=x, columns=[self.name])
ax_1.set_title('Loading', fontsize=14)
ax_1.set_ylabel('Loading [%]', fontsize=11)
df.plot(ax=ax_1)
# losses
y = self.Pset_prof * self.loss_factor
df = pd.DataFrame(data=y, index=x, columns=[self.name])
ax_2.set_title('Losses', fontsize=14)
ax_2.set_ylabel('Losses [MVA]', fontsize=11)
df.plot(ax=ax_2)
plt.legend()
fig.suptitle(self.name, fontsize=20)
if show_fig:
plt.show()
def get_coordinates(self):
"""
Get the branch defining coordinates
"""
return [self.bus_from.get_coordinates(), self.bus_to.get_coordinates()]
| gpl-3.0 | 995,874,103,954,726,500 | 38.687382 | 145 | 0.479643 | false | 3.935387 | false | false | false |
chromano/annotate | project/annotate/views.py | 1 | 3444 | from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import simplejson
from django.views.decorators.csrf import csrf_exempt
from mongoengine import ValidationError
from annotate.models import AnnotatedDoc
from annotate.forms import AnnotatedDocumentCreationForm
def index(request):
ctx = {}
return render_to_response(
"annotate/index.html", RequestContext(request, ctx))
def new(request):
form_class = AnnotatedDocumentCreationForm
form = form_class()
if request.method == "POST":
form = form_class(request.POST)
if form.is_valid():
content = form.cleaned_data["content"]
title = form.cleaned_data['title']
content = content.split("\r\n\r\n")
text = [{'anot':segment, 'content':''} for segment in content]
text[0].update({'content': 'Annotate any paragraph by clicking the pencil\n icon to the right of the reference text.'})
doc = AnnotatedDoc(text=text, title=title)
doc.save()
return HttpResponseRedirect(reverse("edit_annotateddoc", args=[doc.id]))
ctx = {
'form': form
}
return render_to_response(
"annotate/new.html", RequestContext(request, ctx))
@csrf_exempt # FIXME: to be removed soon
def post(request):
if not request.POST or not "doc" in request.POST:
response = {"error": "Invalid request"}
else:
try:
text = simplejson.loads(request.POST["doc"])
doc_hash = request.POST.get("doc_hash")
except ValueError:
response = {"error": "Invalid document"}
else:
try:
doc = AnnotatedDoc.objects.get(id=doc_hash)
doc.text = text
except AnnotatedDoc.DoesNotExist:
doc = AnnotatedDoc(text=text)
except ValidationError:
doc = AnnotatedDoc(text=text)
except Exception, e:
raise Exception, "UH OH: %s" % e
finally:
doc.save()
response = {"success": True, "doc": str(doc.id)}
return HttpResponse(
simplejson.dumps(response), content_type="application/json")
def get(request, doc_hash):
if not request.GET or not doc_hash:
response = {"error": "Invalid request"}
try:
doc = AnnotatedDoc.objects.get(id=doc_hash)
except (ValidationError, AnnotatedDoc.DoesNotExist):
response = {"error": "Does not exist"}
else:
response = {"success": True, "text": doc.text}
return HttpResponse(
simplejson.dumps(response), content_type="application/json")
def edit(request, doc_hash):
return view(request, doc_hash, editable=True)
def view(request, doc_hash, editable=False):
try:
doc = AnnotatedDoc.objects.get(id=doc_hash)
except (ValidationError, AnnotatedDoc.DoesNotExist):
raise Http404
ctx = {'doc_hash': doc_hash,
'doc': doc,
'editable': editable,
}
return render_to_response(
"annotate/view.html", RequestContext(request, ctx))
def list(request):
docs = AnnotatedDoc.objects.all()
ctx = {
"docs" : docs
}
return render_to_response(
"annotate/list.html", RequestContext(request, ctx))
| gpl-2.0 | -8,404,407,104,202,369,000 | 33.787879 | 131 | 0.625145 | false | 4.042254 | false | false | false |
termoshtt/DataProcessor | lib/dataprocessor/pipes/scan.py | 3 | 3212 | # coding=utf-8
"""Scan directories as nodes."""
import os
from glob import glob
from ..nodes import get, validate_link
from ..utility import abspath, boolenize
def directory(node_list, root, whitelist, followlinks=False):
"""Scan nodes from all directories under the directory 'root'.
If one directory has properties of both of 'run' and 'project',
type of the directory is set to 'run'.
Parameters
----------
root : str
Scan directories recursively under the directory `root`.
whitelist : list of str or str
Run node has one or more file or directory
which satisfies run_node_dir/`whitelist`.
And project nodes satisfy project_dir/run_node_dir/`whitelist`.
str can be specified by wildcard.
followlinks : {'False', 'True'}, optional
Whether scan in symbolic link.
Be aware that setting this to True may lead to infinite recursion.
Returns
-------
node_list
Examples
--------
>>> # Initialize node_list.
>>> node_list = directory([], "scandir_path", ["data/hoge*", "*foo*"])
>>> # Rescan node_list.
>>> node_list = [
... {'path': '/tmp/scan_dir/run0',
... 'parents': [], # empty
... 'children': [], # empty
... 'name': 'run0',
... 'type': 'run'}]
>>> node_list = directory([], "scandir_path", ["*.conf"])
"""
root = abspath(root)
followlinks = boolenize(followlinks)
if isinstance(whitelist, str):
whitelist = [whitelist]
scan_nodelist = []
for path, dirs, files in os.walk(root, followlinks=followlinks):
dirs.sort()
node_type = None
parents = []
children = []
if not get(node_list, path) is None:
continue
for child in dirs:
for white in whitelist:
if glob(os.path.join(path, child, white)):
node_type = "project"
children.append(os.path.join(path, child))
break
for white in whitelist:
if glob(os.path.join(path, white)):
node_type = "run"
parents.append(os.path.dirname(path))
break
if not node_type:
continue
scan_nodelist.append({"path": path,
"parents": parents,
"children": children,
"type": node_type,
"name": os.path.basename(path),
})
origin_len = len(node_list)
node_list = node_list + scan_nodelist
for node in node_list[origin_len:]:
validate_link(node_list, node)
return node_list
def register(pipe_dics):
pipe_dics["scan_directory"] = {
"func": directory,
"args": [("root", {"help": "path of root directory"}),
("whitelist",
{"help": "whitelist of file which exists in run directory",
"nargs": "+", }),
],
"kwds": [("followlinks", {"help": "whether scan in symbolic link"})],
"desc": "Scan nodes from all directories under the directory 'root'.",
}
| gpl-3.0 | 5,065,812,686,030,005,000 | 32.458333 | 78 | 0.53269 | false | 4.294118 | false | false | false |
piyanatk/sim | scripts/fg1p/apply_filters2.py | 1 | 4136 | """Apply a set of rolling filter to a data cubes
This copy read complex filter dataset.
"""
import argparse
import os
from glob import glob
from multiprocessing import Pool, Array, current_process
import numpy as np
import xarray as xr
from opstats.foreground import apply_filter
def do_mask(i):
process = current_process().pid
print('... P{:d}: applying filter {:s}'
.format(process, filter_files[i].split('/')[-1]))
filter_ds = xr.open_dataset(filter_files[i])
filter_array = filter_ds['real'].values + (1j * filter_ds['imag'].values)
data_channels = filter_ds.attrs['frequency_channels']
image_channel = int(np.floor(filter_ds['real'].shape[0] / 2))
# Figure out FFT and filter normalization
# FFT normalization factor
x = filter_ds.attrs['x']
y = filter_ds.attrs['y']
f = filter_ds.attrs['f']
dx = x[1] - x[0]
dy = y[1] - y[0]
df = f[1] - f[0]
u = filter_ds.attrs['u']
v = filter_ds.attrs['v']
e = filter_ds.attrs['e']
du = u[1] - u[0]
dv = v[1] - v[0]
de = e[1] - e[0]
fft_norm = dx * dy * df
ifft_norm = du * dv * de * filter_array.size
# Filter normalization factor
filter_volume = np.sum(filter_array.size * du * dv * de)
filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de)
filter_norm = np.sqrt(filter_volume / filter_integral)
# Apply filter
filtered_data = apply_filter(
data_array[data_channels], filter_array,
fft_multiplier=fft_norm, ifft_multiplier=ifft_norm,
output_multiplier=filter_norm, apply_window_func=True,
invert_filter=False
).real
# Select and store the center channel of the filtered data array
filtered_data_array[data_channels[image_channel]] = \
filtered_data[image_channel]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_cube', type=str)
parser.add_argument('filter_directory', type=str,
help='filter directory containing *.nc filter files.')
parser.add_argument('output_file', type=str)
parser.add_argument('--nprocs', type=int, default=4)
args = parser.parse_args()
print('Data cube: {:s}'.format(args.data_cube))
print('Filter directory: {:s}'.format(args.filter_directory))
# Read input data cube
data_da = xr.open_dataarray(args.data_cube)
data_array = data_da.values
# Create shared memory array to store filtered data cube
filtered_data_array_base = Array('d', data_array.size)
filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
filtered_data_array.shape = data_array.shape
# Read in list of filter files
filter_files = glob('{:s}/*.nc'.format(args.filter_directory))
filter_files.sort()
nbins = len(filter_files)
# Attributes for output files
# Temporary read in the first filter to retrive filter information
ds0 = xr.open_dataset(filter_files[0])
filter_bandwidth = ds0.attrs['filter_bandwidth']
image_bandwidth = ds0.attrs['channel_bandwidth']
print('Filter bandwidth: {:.1f} Hz'.format(filter_bandwidth))
print('Image bandwidth: {:.1f} Hz'.format(image_bandwidth))
ds0.close()
output_attrs = data_da.attrs
extra_attrs = {'filter_type': 'wedge',
'extra_filter_shift': 'None',
'filter_bandwidth': filter_bandwidth,
'image_bandwidth': image_bandwidth}
for key, value in extra_attrs.items():
output_attrs[key] = value
# Check output directory
output_dir = args.output_file.rsplit('/', 1)[0]
os.makedirs(output_dir, exist_ok=True)
# Start processing pool
pool = Pool(args.nprocs)
pool.map(do_mask, range(nbins))
pool.close()
pool.join()
# Save output
da = xr.DataArray(filtered_data_array, dims=['f', 'y', 'x'],
coords={'x': data_da.x.values, 'y': data_da.y.values,
'f': data_da.f.values},
attrs=output_attrs)
da.to_netcdf(args.output_file)
print('Saving out put to {:s}'.format(args.output_file))
| mit | 7,481,226,182,816,106,000 | 34.350427 | 78 | 0.626692 | false | 3.438071 | false | false | false |
WosunOO/nca_xianshu | nca47/objects/dns/dns_gpool.py | 1 | 2255 | from nca47.db import api as db_api
from nca47.db.sqlalchemy.models import GPoolInfo as GPoolModel
from nca47.objects import base
from nca47.objects import fields as object_fields
class DnsGPool(base.Nca47Object):
VERSION = '1.0'
fields = {
'tenant_id': object_fields.StringField(),
'name': object_fields.StringField(),
'enable': object_fields.StringField(),
'pass_': object_fields.StringField(),
'ttl': object_fields.StringField(),
'max_addr_ret': object_fields.StringField(),
'cname': object_fields.StringField(),
'first_algorithm': object_fields.StringField(),
'second_algorithm': object_fields.StringField(),
'fallback_ip': object_fields.StringField(),
'hms': object_fields.ListOfStringsField(),
'gmember_list': object_fields.ListOfStringsField(),
'warning': object_fields.StringField(),
'gpool_id': object_fields.StringField(),
}
def __init__(self, context=None, **kwargs):
self.db_api = db_api.get_instance()
super(DnsGPool, self).__init__(context=None, **kwargs)
@staticmethod
def __from_db_object(dns_gpool, db_dns_gpool):
"""
:param dns_syngroup:
:param db_dns_syngroup:
:return:
"""
for field in dns_gpool.fields:
dns_gpool[field] = db_dns_gpool
dns_gpool.obj_reset_changes()
return dns_gpool
def create(self, context, values):
gpool = self.db_api.create(GPoolModel, values)
return gpool
def update(self, context, id, values):
gpool = self.db_api.update_object(GPoolModel, id, values)
return gpool
def get_object(self, context, **values):
gpool = self.db_api.get_object(GPoolModel, **values)
return gpool
# def get_objects(self, context, **values):
# gpool = self.db_api.get_objects(GPoolModel, **values)
# return gpool
def delete(self, context, id):
gpool = self.db_api.delete_object(GPoolModel, id)
return gpool
def get_objects(self, context, str_sql):
gpool = self.db_api.get_all_object(GPoolModel, str_sql)
return gpool
| apache-2.0 | 5,704,167,474,772,620,000 | 33.234375 | 65 | 0.605322 | false | 3.648867 | false | false | false |
walterbender/Pippy | data/en/tutorials/Tutorial_14_menu.py | 2 | 6081 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class PyApp(Gtk.Window):
def __init__(self):
super(PyApp, self).__init__()
self.set_title('Menu')
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_size_request(250, 150)
accel_group = Gtk.AccelGroup()
self.add_accel_group(accel_group)
vbox = Gtk.VBox()
menubar = Gtk.MenuBar()
vbox.pack_start(menubar, False, False, 0)
self.label = Gtk.Label(label='Activate a menu item')
vbox.pack_start(self.label, True, True, 0)
menu_file = Gtk.Menu()
item_file = Gtk.MenuItem.new_with_mnemonic('_File')
item_file.set_submenu(menu_file)
menubar.append(item_file)
item_new = Gtk.MenuItem.new_with_mnemonic('_New')
key, mod = Gtk.accelerator_parse('<Ctrl>N')
item_new.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_new.connect('activate', self._activate_cb, 'New')
menu_file.append(item_new)
item_open = Gtk.MenuItem.new_with_mnemonic('_Open')
key, mod = Gtk.accelerator_parse('<Ctrl>O')
item_open.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_open.connect('activate', self._activate_cb, 'Open')
menu_file.append(item_open)
menu_recents = Gtk.Menu()
item_recents = Gtk.MenuItem.new_with_mnemonic('Open _recents')
item_recents.set_submenu(menu_recents)
menu_file.append(item_recents)
for recent_file in range(1, 6):
item_recent = Gtk.MenuItem.new_with_mnemonic('_%d: Recent file %d' % (recent_file, recent_file))
item_recent.connect('activate', self._activate_cb, 'Recent file %d' % recent_file)
menu_recents.append(item_recent)
separator = Gtk.SeparatorMenuItem()
menu_file.append(separator)
item_exit = Gtk.MenuItem.new_with_mnemonic('_Quit')
key, mod = Gtk.accelerator_parse('<Ctrl>Q')
item_exit.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_exit.connect('activate', self._activate_cb, 'Quit')
menu_file.append(item_exit)
menu_edit = Gtk.Menu()
item_edit = Gtk.MenuItem.new_with_mnemonic('_Edit')
item_edit.set_submenu(menu_edit)
menubar.append(item_edit)
item_undo = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_UNDO, None)
key, mod = Gtk.accelerator_parse('<Ctrl>Z')
item_undo.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_undo.connect('activate', self._activate_cb, 'Undo')
menu_edit.append(item_undo)
item_redo = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_REDO, None)
key, mod = Gtk.accelerator_parse('<Ctrl><Shift>Z')
item_redo.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_redo.connect('activate', self._activate_cb, 'Redo')
menu_edit.append(item_redo)
separator = Gtk.SeparatorMenuItem()
menu_edit.append(separator)
item_copy = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_COPY, None)
key, mod = Gtk.accelerator_parse('<Ctrl>C')
item_copy.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_copy.connect('activate', self._activate_cb, 'Copy')
menu_edit.append(item_copy)
item_cut = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_CUT, None)
key, mod = Gtk.accelerator_parse('<Ctrl>X')
item_cut.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_cut.connect('activate', self._activate_cb, 'Cut')
menu_edit.append(item_cut)
item_paste = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_PASTE, None)
key, mod = Gtk.accelerator_parse('<Ctrl>V')
item_paste.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_paste.connect('activate', self._activate_cb, 'Paste')
menu_edit.append(item_paste)
separator = Gtk.SeparatorMenuItem()
menu_edit.append(separator)
label = 'Vertical page'
item_vertical = Gtk.RadioMenuItem(label=label)
item_vertical.set_active(True)
item_vertical.connect('toggled', self._toggled_cb, label)
menu_edit.append(item_vertical)
label = 'Horizontal page'
item_horizontal = Gtk.RadioMenuItem.new_with_label((item_vertical,), label)
item_horizontal.connect('toggled', self._toggled_cb, label)
menu_edit.append(item_horizontal)
menu_view = Gtk.Menu()
item_view = Gtk.MenuItem.new_with_mnemonic('_View')
item_view.set_submenu(menu_view)
menubar.append(item_view)
item_hides = Gtk.CheckMenuItem.new_with_mnemonic('_Hidden files')
key, mod = Gtk.accelerator_parse('<Ctrl>H')
item_hides.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_hides.connect('toggled', self._toggled_cb, 'Hidden files', True)
menu_view.append(item_hides)
menu_help = Gtk.Menu()
item_help = Gtk.MenuItem(label='Ayuda')
item_help.set_submenu(menu_help)
menubar.append(item_help)
item_about = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_ABOUT, None)
item_about.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE)
item_about.connect('activate', self._activate_cb, 'About')
menu_help.append(item_about)
self.add(vbox)
self.show_all()
self.connect('destroy', Gtk.main_quit)
def _activate_cb(self, item, label):
self.label.set_text('You activated %s item' % label)
def _toggled_cb(self, item, label, no_active=False):
if item.get_active():
self.label.set_text('You activated %s item' % label)
elif not item.get_active() and no_active:
self.label.set_text('You deactivate %s item' % label)
PyApp()
Gtk.main()
| gpl-3.0 | 3,349,581,230,072,835,600 | 37.980769 | 108 | 0.634106 | false | 3.382091 | false | false | false |
jrd/pylibsalt | libsalt/salt.py | 1 | 6111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set et ai sta sts=2 sw=2 ts=2 tw=0:
"""
SaLT functions:
- getSaLTVersion
- isSaLTVersionAtLeast
- isSaLTLiveEnv
- isSaLTLiveCloneEnv
- getSaLTLiveMountPoint
- getSaLTRootDir
- getSaLTIdentFile
- getSaLTBaseDir
- listSaLTModules
- installSaLTModule
"""
from __future__ import print_function, unicode_literals, absolute_import
from .freesize import *
import os
import glob
import re
import subprocess
from threading import Thread
from time import sleep
def getSaLTVersion():
"""
Returns the SaLT version if run in a SaLT Live environment
"""
_checkLive()
return open('/mnt/salt/salt-version', 'r').read().strip()
def isSaLTVersionAtLeast(version):
"""
Returns True if the SaLT version is at least 'version'.
"""
v = getSaLTVersion()
def vercmp(v1, v2):
def _makelist(v):
lst = [int(x) for x in re.sub(r'[a-z]', '', v.lower()).split('.')]
while lst[-1] == 0:
lst.pop()
return lst
return _makelist(v1).__ge__(_makelist(v2))
return vercmp(version, v)
def isSaLTLiveEnv():
"""
Returns True if it is executed in a SaLT Live environment, False otherwise
"""
return os.path.isfile('/mnt/salt/salt-version') and os.path.isfile('/mnt/salt/tmp/distro_infos')
def _checkLive():
if not isSaLTLiveEnv():
raise Exception('Not in SaLT Live environment.')
def isSaLTLiveCloneEnv():
"""
Returns True if it is executed in a SaLT LiveClone environment, False otherwise
"""
if not isSaLTLiveEnv():
return False
else:
moduledir = '{0}/{1}/{2}/modules'.format(getSaLTLiveMountPoint(), getSaLTBaseDir(), getSaLTRootDir())
return os.path.isfile(moduledir + '/01-clone.salt')
def getSaLTLiveMountPoint():
"""
Returns the SaLT source mount point path. It could be the mount point of the optical drive or the USB stick for example.
"""
_checkLive()
try:
# format:
# mountpoint:device
ret = open('/mnt/salt/tmp/distro_infos', 'r').read().splitlines()[0].split(':', 1)[0]
except:
ret = None
return "/mnt/salt{0}".format(ret)
def getSaLTRootDir():
"""
Returns the SaLT ROOT_DIR, which is the directory containing SaLT modules.
This is not the full path but a relative path to BASEDIR.
"""
_checkLive()
ret = None
for line in open('/mnt/salt/etc/salt.cfg', 'r').read().splitlines():
if line.startswith('ROOT_DIR='):
ret = line.split('=', 1)[1]
break
return ret
def getSaLTIdentFile():
"""
Returns the SaLT IDENT_FILE, which is the file located at the root of a filesystem containing some SaLT information for this Live session.
This is not the full path but a relative path to the mount point.
"""
_checkLive()
ret = None
for line in open('/mnt/salt/etc/salt.cfg', 'r').read().splitlines():
if line.startswith('IDENT_FILE='):
ret = line.split('=', 1)[1]
break
return ret
def getSaLTBaseDir():
"""
Returns the SaLT BASEDIR, which is the directory containing all files for this Live session.
This is not a full path but a relative path to the mount point.
"""
_checkLive()
mountpoint = getSaLTLiveMountPoint()
identfile = getSaLTIdentFile()
ret = None
if mountpoint and identfile:
for line in open('{0}/{1}'.format(mountpoint, identfile), 'r').read().splitlines():
if line.startswith('basedir='):
ret = line.split('=', 1)[1]
break
if ret is not None and len(ret) == 0:
ret = '.' # for not having empty path. GNU is ok having a path like a/b//c/d but it's preferable to have a/b/./c/d if possible
return ret
def listSaLTModules():
"""
Returns the list of SaLT modules for this Live session.
"""
_checkLive()
moduledir = '{0}/{1}/{2}/modules'.format(getSaLTLiveMountPoint(), getSaLTBaseDir(), getSaLTRootDir())
return sorted(map(lambda(x): re.sub(r'.*/([^/]+).salt$', r'\1', x), glob.glob('{0}/*.salt'.format(moduledir))))
def getSaLTModulePath(moduleName):
"""
Get the module full path.
"""
return '/mnt/salt/mnt/modules/{0}'.format(moduleName)
def installSaLTModule(moduleName, moduleSize, targetMountPoint, callback, callback_args=(), interval = 10, completeCallback = None):
"""
Install the module 'moduleName' from this Live session into the targetMountPoint.
'moduleSize' is the uncompressed size of the module expressed in bytes.
The 'callback' function will be called each 'interval' seconds with the pourcentage (0 ≤ x ≤ 1) of progression (based on used size of target partition) as first argument and all value of callback_args as next arguments
The 'completeCallback' function will be called after the completion of installation.
"""
_checkLive()
src = getSaLTModulePath(moduleName)
if not os.path.isdir(src):
raise IOError("The module '{0}' does not exists".format(moduleName))
if not os.path.isdir(targetMountPoint):
raise IOError("The target mount point '{0}' does not exists".format(targetMountPoint))
def get_used_size(p):
return getSizes(p, False)['used']
class ExecCopyTask:
def _run(self, *args, **kwargs):
cmd = args[0]
self._p = subprocess.Popen(cmd)
self._p.wait()
def start(self, cmd):
self._t = Thread(target=self._run, args=(cmd,))
self._t.start()
def is_running(self):
return self._t and self._t.is_alive()
def stop(self):
if self._p:
self._p.kill()
self._p = None
init_size = get_used_size(targetMountPoint)
actual_size = init_size
t = ExecCopyTask()
t.start(['cp', '--preserve', '-r', '-f', '--remove-destination', '{0}/.'.format(src), targetMountPoint + '/'])
while t.is_running():
for x in range(interval):
sleep(1)
if not t.is_running():
break
if t.is_running():
actual_size = get_used_size(targetMountPoint)
diff_size = float(actual_size - init_size)
if diff_size < 0: # is this possible?
diff_size = 0
p = diff_size / moduleSize
if p > 1:
p = 1
if not callback(p, *callback_args):
t.stop()
if completeCallback:
completeCallback()
| gpl-2.0 | -2,646,736,446,125,352,000 | 28.631068 | 220 | 0.657929 | false | 3.394883 | false | false | false |
arnoldlu/lisa | libs/utils/trace.py | 2 | 31965 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Trace Parser Module """
import numpy as np
import os
import pandas as pd
import sys
import trappy
import json
import warnings
import operator
import logging
from analysis_register import AnalysisRegister
from collections import namedtuple
from devlib.utils.misc import memoized
from trappy.utils import listify, handle_duplicate_index
NON_IDLE_STATE = -1
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class Trace(object):
"""
The Trace object is the LISA trace events parser.
:param platform: a dictionary containing information about the target
platform
:type platform: dict
:param data_dir: folder containing all trace data
:type data_dir: str
:param events: events to be parsed (everything in the trace by default)
:type events: list(str)
:param window: time window to consider when parsing the trace
:type window: tuple(int, int)
:param normalize_time: normalize trace time stamps
:type normalize_time: bool
:param trace_format: format of the trace. Possible values are:
- FTrace
- SysTrace
:type trace_format: str
:param plots_dir: directory where to save plots
:type plots_dir: str
:param plots_prefix: prefix for plots file names
:type plots_prefix: str
"""
def __init__(self, platform, data_dir, events,
window=(0, None),
normalize_time=True,
trace_format='FTrace',
plots_dir=None,
plots_prefix=''):
# The platform used to run the experiments
self.platform = platform
# TRAPpy Trace object
self.ftrace = None
# Trace format
self.trace_format = trace_format
# The time window used to limit trace parsing to
self.window = window
# Dynamically registered TRAPpy events
self.trappy_cls = {}
# Maximum timespan for all collected events
self.time_range = 0
# Time the system was overutilzied
self.overutilized_time = 0
self.overutilized_prc = 0
# List of events required by user
self.events = []
# List of events available in the parsed trace
self.available_events = []
# Cluster frequency coherency flag
self.freq_coherency = True
# Folder containing all trace data
self.data_dir = None
# Setup logging
self._log = logging.getLogger('Trace')
# Folder containing trace
if not os.path.isdir(data_dir):
self.data_dir = os.path.dirname(data_dir)
else:
self.data_dir = data_dir
# By deafult, use the trace dir to save plots
self.plots_dir = plots_dir
if self.plots_dir is None:
self.plots_dir = self.data_dir
self.plots_prefix = plots_prefix
self.__registerTraceEvents(events)
self.__parseTrace(data_dir, window, normalize_time,
trace_format)
self.__computeTimeSpan()
# Minimum and Maximum x_time to use for all plots
self.x_min = 0
self.x_max = self.time_range
# Reset x axis time range to full scale
t_min = self.window[0]
t_max = self.window[1]
self.setXTimeRange(t_min, t_max)
self.data_frame = TraceData()
self._registerDataFrameGetters(self)
self.analysis = AnalysisRegister(self)
def _registerDataFrameGetters(self, module):
"""
Internal utility function that looks up getter functions with a "_dfg_"
prefix in their name and bounds them to the specified module.
:param module: module to which the function is added
:type module: class
"""
self._log.debug('Registering [%s] local data frames', module)
for func in dir(module):
if not func.startswith('_dfg_'):
continue
dfg_name = func.replace('_dfg_', '')
dfg_func = getattr(module, func)
self._log.debug(' %s', dfg_name)
setattr(self.data_frame, dfg_name, dfg_func)
def setXTimeRange(self, t_min=None, t_max=None):
"""
Set x axis time range to the specified values.
:param t_min: lower bound
:type t_min: int or float
:param t_max: upper bound
:type t_max: int or float
"""
if t_min is None:
self.x_min = 0
else:
self.x_min = t_min
if t_max is None:
self.x_max = self.time_range
else:
self.x_max = t_max
self._log.debug('Set plots time range to (%.6f, %.6f)[s]',
self.x_min, self.x_max)
def __registerTraceEvents(self, events):
"""
Save a copy of the parsed events.
:param events: single event name or list of events names
:type events: str or list(str)
"""
if isinstance(events, basestring):
self.events = events.split(' ')
elif isinstance(events, list):
self.events = events
else:
raise ValueError('Events must be a string or a list of strings')
# Register devlib fake cpu_frequency events
if 'cpu_frequency' in events:
self.events.append('cpu_frequency_devlib')
def __parseTrace(self, path, window, normalize_time, trace_format):
"""
Internal method in charge of performing the actual parsing of the
trace.
:param path: path to the trace folder (or trace file)
:type path: str
:param window: time window to consider when parsing the trace
:type window: tuple(int, int)
:param normalize_time: normalize trace time stamps
:type normalize_time: bool
:param trace_format: format of the trace. Possible values are:
- FTrace
- SysTrace
:type trace_format: str
"""
self._log.debug('Loading [sched] events from trace in [%s]...', path)
self._log.debug('Parsing events: %s', self.events)
if trace_format.upper() == 'SYSTRACE' or path.endswith('html'):
self._log.debug('Parsing SysTrace format...')
trace_class = trappy.SysTrace
self.trace_format = 'SysTrace'
elif trace_format.upper() == 'FTRACE':
self._log.debug('Parsing FTrace format...')
trace_class = trappy.FTrace
self.trace_format = 'FTrace'
else:
raise ValueError("Unknown trace format {}".format(trace_format))
self.ftrace = trace_class(path, scope="custom", events=self.events,
window=window, normalize_time=normalize_time)
# Load Functions profiling data
has_function_stats = self._loadFunctionsStats(path)
# Check for events available on the parsed trace
self.__checkAvailableEvents()
if len(self.available_events) == 0:
if has_function_stats:
self._log.info('Trace contains only functions stats')
return
raise ValueError('The trace does not contain useful events '
'nor function stats')
# Index PIDs and Task names
self.__loadTasksNames()
# Setup internal data reference to interesting events/dataframes
self._sanitize_SchedLoadAvgCpu()
self._sanitize_SchedLoadAvgTask()
self._sanitize_SchedCpuCapacity()
self._sanitize_SchedBoostCpu()
self._sanitize_SchedBoostTask()
self._sanitize_SchedEnergyDiff()
self._sanitize_SchedOverutilized()
self._sanitize_CpuFrequency()
# Compute plot window
if not normalize_time:
start = self.window[0]
if self.window[1]:
duration = min(self.ftrace.get_duration(), self.window[1])
else:
duration = self.ftrace.get_duration()
self.window = (self.ftrace.basetime + start,
self.ftrace.basetime + duration)
def __checkAvailableEvents(self, key=""):
"""
Internal method used to build a list of available events.
:param key: key to be used for TRAPpy filtering
:type key: str
"""
for val in self.ftrace.get_filters(key):
obj = getattr(self.ftrace, val)
if len(obj.data_frame):
self.available_events.append(val)
self._log.debug('Events found on trace:')
for evt in self.available_events:
self._log.debug(' - %s', evt)
def __loadTasksNames(self):
"""
Try to load tasks names using one of the supported events.
"""
def load(event, name_key, pid_key):
df = self._dfg_trace_event(event)
self._scanTasks(df, name_key=name_key, pid_key=pid_key)
if 'sched_switch' in self.available_events:
load('sched_switch', 'prev_comm', 'prev_pid')
return
if 'sched_load_avg_task' in self.available_events:
load('sched_load_avg_task', 'comm', 'pid')
return
self._log.warning('Failed to load tasks names from trace events')
def hasEvents(self, dataset):
"""
Returns True if the specified event is present in the parsed trace,
False otherwise.
:param dataset: trace event name or list of trace events
:type dataset: str or list(str)
"""
if dataset in self.available_events:
return True
return False
def __computeTimeSpan(self):
"""
Compute time axis range, considering all the parsed events.
"""
ts = sys.maxint
te = 0
for events in self.available_events:
df = self._dfg_trace_event(events)
if len(df) == 0:
continue
if (df.index[0]) < ts:
ts = df.index[0]
if (df.index[-1]) > te:
te = df.index[-1]
self.time_range = te - ts
self._log.debug('Collected events spans a %.3f [s] time interval',
self.time_range)
# Build a stat on trace overutilization
if self.hasEvents('sched_overutilized'):
df = self._dfg_trace_event('sched_overutilized')
self.overutilized_time = df[df.overutilized == 1].len.sum()
self.overutilized_prc = 100. * self.overutilized_time / self.time_range
self._log.debug('Overutilized time: %.6f [s] (%.3f%% of trace time)',
self.overutilized_time, self.overutilized_prc)
def _scanTasks(self, df, name_key='comm', pid_key='pid'):
"""
Extract tasks names and PIDs from the input data frame. The data frame
should contain a task name column and PID column.
:param df: data frame containing trace events from which tasks names
and PIDs will be extracted
:type df: :mod:`pandas.DataFrame`
:param name_key: The name of the dataframe columns containing task
names
:type name_key: str
:param pid_key: The name of the dataframe columns containing task PIDs
:type pid_key: str
"""
df = df[[name_key, pid_key]]
self._tasks_by_pid = (df.drop_duplicates(subset=pid_key, keep='last')
.rename(columns={
pid_key : 'PID',
name_key : 'TaskName'})
.set_index('PID').sort_index())
def getTaskByName(self, name):
"""
Get the PIDs of all tasks with the specified name.
The same PID can have different task names, mainly because once a task
is generated it inherits the parent name and then its name is updated
to represent what the task really is.
This API works under the assumption that a task name is updated at
most one time and it always considers the name a task had the last time
it has been scheduled for execution in the current trace.
:param name: task name
:type name: str
:return: a list of PID for tasks which name matches the required one,
the last time they ran in the current trace
"""
return (self._tasks_by_pid[self._tasks_by_pid.TaskName == name]
.index.tolist())
def getTaskByPid(self, pid):
"""
Get the name of the task with the specified PID.
The same PID can have different task names, mainly because once a task
is generated it inherits the parent name and then its name is
updated to represent what the task really is.
This API works under the assumption that a task name is updated at
most one time and it always report the name the task had the last time
it has been scheduled for execution in the current trace.
:param name: task PID
:type name: int
:return: the name of the task which PID matches the required one,
the last time they ran in the current trace
"""
try:
return self._tasks_by_pid.ix[pid].values[0]
except KeyError:
return None
def getTasks(self):
"""
Get a dictionary of all the tasks in the Trace.
:return: a dictionary which maps each PID to the corresponding task
name
"""
return self._tasks_by_pid.TaskName.to_dict()
###############################################################################
# DataFrame Getter Methods
###############################################################################
def df(self, event):
"""
Get a dataframe containing all occurrences of the specified trace event
in the parsed trace.
:param event: Trace event name
:type event: str
"""
warnings.simplefilter('always', DeprecationWarning) #turn off filter
warnings.warn("\n\tUse of Trace::df() is deprecated and will be soon removed."
"\n\tUse Trace::data_frame.trace_event(event_name) instead.",
category=DeprecationWarning)
warnings.simplefilter('default', DeprecationWarning) #reset filter
return self._dfg_trace_event(event)
def _dfg_trace_event(self, event):
"""
Get a dataframe containing all occurrences of the specified trace event
in the parsed trace.
:param event: Trace event name
:type event: str
"""
if self.data_dir is None:
raise ValueError("trace data not (yet) loaded")
if self.ftrace and hasattr(self.ftrace, event):
return getattr(self.ftrace, event).data_frame
raise ValueError('Event [{}] not supported. '
'Supported events are: {}'
.format(event, self.available_events))
def _dfg_functions_stats(self, functions=None):
"""
Get a DataFrame of specified kernel functions profile data
For each profiled function a DataFrame is returned which reports stats
on kernel functions execution time. The reported stats are per-CPU and
includes: number of times the function has been executed (hits),
average execution time (avg), overall execution time (time) and samples
variance (s_2).
By default returns a DataFrame of all the functions profiled.
:param functions: the name of the function or a list of function names
to report
:type functions: str or list(str)
"""
if not hasattr(self, '_functions_stats_df'):
return None
df = self._functions_stats_df
if not functions:
return df
return df.loc[df.index.get_level_values(1).isin(listify(functions))]
###############################################################################
# Trace Events Sanitize Methods
###############################################################################
def _sanitize_SchedCpuCapacity(self):
"""
Add more columns to cpu_capacity data frame if the energy model is
available.
"""
if not self.hasEvents('cpu_capacity') \
or 'nrg_model' not in self.platform:
return
df = self._dfg_trace_event('cpu_capacity')
# Add column with LITTLE and big CPUs max capacities
nrg_model = self.platform['nrg_model']
max_lcap = nrg_model['little']['cpu']['cap_max']
max_bcap = nrg_model['big']['cpu']['cap_max']
df['max_capacity'] = np.select(
[df.cpu.isin(self.platform['clusters']['little'])],
[max_lcap], max_bcap)
# Add LITTLE and big CPUs "tipping point" threshold
tip_lcap = 0.8 * max_lcap
tip_bcap = 0.8 * max_bcap
df['tip_capacity'] = np.select(
[df.cpu.isin(self.platform['clusters']['little'])],
[tip_lcap], tip_bcap)
def _sanitize_SchedLoadAvgCpu(self):
"""
If necessary, rename certain signal names from v5.0 to v5.1 format.
"""
if not self.hasEvents('sched_load_avg_cpu'):
return
df = self._dfg_trace_event('sched_load_avg_cpu')
if 'utilization' in df:
df.rename(columns={'utilization': 'util_avg'}, inplace=True)
df.rename(columns={'load': 'load_avg'}, inplace=True)
def _sanitize_SchedLoadAvgTask(self):
"""
If necessary, rename certain signal names from v5.0 to v5.1 format.
"""
if not self.hasEvents('sched_load_avg_task'):
return
df = self._dfg_trace_event('sched_load_avg_task')
if 'utilization' in df:
df.rename(columns={'utilization': 'util_avg'}, inplace=True)
df.rename(columns={'load': 'load_avg'}, inplace=True)
df.rename(columns={'avg_period': 'period_contrib'}, inplace=True)
df.rename(columns={'runnable_avg_sum': 'load_sum'}, inplace=True)
df.rename(columns={'running_avg_sum': 'util_sum'}, inplace=True)
df['cluster'] = np.select(
[df.cpu.isin(self.platform['clusters']['little'])],
['LITTLE'], 'big')
# Add a column which represents the max capacity of the smallest
# clustre which can accomodate the task utilization
little_cap = self.platform['nrg_model']['little']['cpu']['cap_max']
big_cap = self.platform['nrg_model']['big']['cpu']['cap_max']
df['min_cluster_cap'] = df.util_avg.map(
lambda util_avg: big_cap if util_avg > little_cap else little_cap
)
def _sanitize_SchedBoostCpu(self):
"""
Add a boosted utilization signal as the sum of utilization and margin.
Also, if necessary, rename certain signal names from v5.0 to v5.1
format.
"""
if not self.hasEvents('sched_boost_cpu'):
return
df = self._dfg_trace_event('sched_boost_cpu')
if 'usage' in df:
df.rename(columns={'usage': 'util'}, inplace=True)
df['boosted_util'] = df['util'] + df['margin']
def _sanitize_SchedBoostTask(self):
"""
Add a boosted utilization signal as the sum of utilization and margin.
Also, if necessary, rename certain signal names from v5.0 to v5.1
format.
"""
if not self.hasEvents('sched_boost_task'):
return
df = self._dfg_trace_event('sched_boost_task')
if 'utilization' in df:
# Convert signals name from to v5.1 format
df.rename(columns={'utilization': 'util'}, inplace=True)
df['boosted_util'] = df['util'] + df['margin']
def _sanitize_SchedEnergyDiff(self):
"""
If a energy model is provided, some signals are added to the
sched_energy_diff trace event data frame.
Also convert between existing field name formats for sched_energy_diff
"""
if not self.hasEvents('sched_energy_diff') \
or 'nrg_model' not in self.platform:
return
nrg_model = self.platform['nrg_model']
em_lcluster = nrg_model['little']['cluster']
em_bcluster = nrg_model['big']['cluster']
em_lcpu = nrg_model['little']['cpu']
em_bcpu = nrg_model['big']['cpu']
lcpus = len(self.platform['clusters']['little'])
bcpus = len(self.platform['clusters']['big'])
SCHED_LOAD_SCALE = 1024
power_max = em_lcpu['nrg_max'] * lcpus + em_bcpu['nrg_max'] * bcpus + \
em_lcluster['nrg_max'] + em_bcluster['nrg_max']
self._log.debug(
"Maximum estimated system energy: {0:d}".format(power_max))
df = self._dfg_trace_event('sched_energy_diff')
translations = {'nrg_d' : 'nrg_diff',
'utl_d' : 'usage_delta',
'payoff' : 'nrg_payoff'
}
df.rename(columns=translations, inplace=True)
df['nrg_diff_pct'] = SCHED_LOAD_SCALE * df.nrg_diff / power_max
# Tag columns by usage_delta
ccol = df.usage_delta
df['usage_delta_group'] = np.select(
[ccol < 150, ccol < 400, ccol < 600],
['< 150', '< 400', '< 600'], '>= 600')
# Tag columns by nrg_payoff
ccol = df.nrg_payoff
df['nrg_payoff_group'] = np.select(
[ccol > 2e9, ccol > 0, ccol > -2e9],
['Optimal Accept', 'SchedTune Accept', 'SchedTune Reject'],
'Suboptimal Reject')
def _sanitize_SchedOverutilized(self):
""" Add a column with overutilized status duration. """
if not self.hasEvents('sched_overutilized'):
return
df = self._dfg_trace_event('sched_overutilized')
df['start'] = df.index
df['len'] = (df.start - df.start.shift()).fillna(0).shift(-1)
df.drop('start', axis=1, inplace=True)
def _chunker(self, seq, size):
"""
Given a data frame or a series, generate a sequence of chunks of the
given size.
:param seq: data to be split into chunks
:type seq: :mod:`pandas.Series` or :mod:`pandas.DataFrame`
:param size: size of each chunk
:type size: int
"""
return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))
def _sanitize_CpuFrequency(self):
"""
Verify that all platform reported clusters are frequency coherent (i.e.
frequency scaling is performed at a cluster level).
"""
if not self.hasEvents('cpu_frequency_devlib'):
return
devlib_freq = self._dfg_trace_event('cpu_frequency_devlib')
devlib_freq.rename(columns={'cpu_id':'cpu'}, inplace=True)
devlib_freq.rename(columns={'state':'frequency'}, inplace=True)
df = self._dfg_trace_event('cpu_frequency')
clusters = self.platform['clusters']
# devlib always introduces fake cpu_frequency events, in case the
# OS has not generated cpu_frequency envets there are the only
# frequency events to report
if len(df) == 0:
# Register devlib injected events as 'cpu_frequency' events
setattr(self.ftrace.cpu_frequency, 'data_frame', devlib_freq)
df = devlib_freq
self.available_events.append('cpu_frequency')
# make sure fake cpu_frequency events are never interleaved with
# OS generated events
else:
if len(devlib_freq) > 0:
# Frequencies injection is done in a per-cluster based.
# This is based on the assumption that clusters are
# frequency choerent.
# For each cluster we inject devlib events only if
# these events does not overlaps with os-generated ones.
# Inject "initial" devlib frequencies
os_df = df
dl_df = devlib_freq.iloc[:self.platform['cpus_count']]
for _,c in self.platform['clusters'].iteritems():
dl_freqs = dl_df[dl_df.cpu.isin(c)]
os_freqs = os_df[os_df.cpu.isin(c)]
self._log.debug("First freqs for %s:\n%s", c, dl_freqs)
# All devlib events "before" os-generated events
self._log.debug("Min os freq @: %s", os_freqs.index.min())
if os_freqs.empty or \
os_freqs.index.min() > dl_freqs.index.max():
self._log.debug("Insert devlib freqs for %s", c)
df = pd.concat([dl_freqs, df])
# Inject "final" devlib frequencies
os_df = df
dl_df = devlib_freq.iloc[self.platform['cpus_count']:]
for _,c in self.platform['clusters'].iteritems():
dl_freqs = dl_df[dl_df.cpu.isin(c)]
os_freqs = os_df[os_df.cpu.isin(c)]
self._log.debug("Last freqs for %s:\n%s", c, dl_freqs)
# All devlib events "after" os-generated events
self._log.debug("Max os freq @: %s", os_freqs.index.max())
if os_freqs.empty or \
os_freqs.index.max() < dl_freqs.index.min():
self._log.debug("Append devlib freqs for %s", c)
df = pd.concat([df, dl_freqs])
df.sort_index(inplace=True)
setattr(self.ftrace.cpu_frequency, 'data_frame', df)
# Frequency Coherency Check
for _, cpus in clusters.iteritems():
cluster_df = df[df.cpu.isin(cpus)]
for chunk in self._chunker(cluster_df, len(cpus)):
f = chunk.iloc[0].frequency
if any(chunk.frequency != f):
self._log.warning('Cluster Frequency is not coherent! '
'Failure in [cpu_frequency] events at:')
self._log.warning(chunk)
self.freq_coherency = False
return
self._log.info('Platform clusters verified to be Frequency coherent')
###############################################################################
# Utility Methods
###############################################################################
def integrate_square_wave(self, sq_wave):
"""
Compute the integral of a square wave time series.
:param sq_wave: square wave assuming only 1.0 and 0.0 values
:type sq_wave: :mod:`pandas.Series`
"""
sq_wave.iloc[-1] = 0.0
# Compact signal to obtain only 1-0-1-0 sequences
comp_sig = sq_wave.loc[sq_wave.shift() != sq_wave]
# First value for computing the difference must be a 1
if comp_sig.iloc[0] == 0.0:
return sum(comp_sig.iloc[2::2].index - comp_sig.iloc[1:-1:2].index)
else:
return sum(comp_sig.iloc[1::2].index - comp_sig.iloc[:-1:2].index)
def _loadFunctionsStats(self, path='trace.stats'):
"""
Read functions profiling file and build a data frame containing all
relevant data.
:param path: path to the functions profiling trace file
:type path: str
"""
if os.path.isdir(path):
path = os.path.join(path, 'trace.stats')
if (path.endswith('dat') or
path.endswith('txt') or
path.endswith('html')):
pre, ext = os.path.splitext(path)
path = pre + '.stats'
if not os.path.isfile(path):
return False
# Opening functions profiling JSON data file
self._log.debug('Loading functions profiling data from [%s]...', path)
with open(os.path.join(path), 'r') as fh:
trace_stats = json.load(fh)
# Build DataFrame of function stats
frames = {}
for cpu, data in trace_stats.iteritems():
frames[int(cpu)] = pd.DataFrame.from_dict(data, orient='index')
# Build and keep track of the DataFrame
self._functions_stats_df = pd.concat(frames.values(),
keys=frames.keys())
return len(self._functions_stats_df) > 0
@memoized
def getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if the CPU is reported to be non-idle by cpuidle at
time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
:returns: A :mod:`pandas.Series` or ``None`` if the trace contains no
"cpu_idle" events
"""
if not self.hasEvents('cpu_idle'):
self._log.warning('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self.ftrace.normalized_time:
start_time = self.ftrace.basetime
if cpu_active.empty:
cpu_active = pd.Series([0], index=[start_time])
elif cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
# Fix sequences of wakeup/sleep events reported with the same index
return handle_duplicate_index(cpu_active)
@memoized
def getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be non-idle
by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
:returns: A :mod:`pandas.Series` or ``None`` if the trace contains no
"cpu_idle" events
"""
if not self.hasEvents('cpu_idle'):
self._log.warning('Events [cpu_idle] not found, '
'cannot compute cluster active signal!')
return None
active = self.getCPUActiveSignal(cluster[0]).to_frame(name=cluster[0])
for cpu in cluster[1:]:
active = active.join(
self.getCPUActiveSignal(cpu).to_frame(name=cpu),
how='outer'
)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
class TraceData:
""" A DataFrame collector exposed to Trace's clients """
pass
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 | -993,559,102,080,336,500 | 35.953757 | 86 | 0.568653 | false | 4.147528 | false | false | false |
shakib609/AskFmClone | apps/askfm/views.py | 1 | 3935 | from django.contrib import messages
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from ..question.models import Question, Answer
from .models import Follow
from .forms import QuestionForm
from .helpers import get_total_likes
# @login_required
# def my_profile(request):
# unanswered_questions = Question.objects.filter(
# asked_to=request.user,
# answer=None
# ).select_related('asked_by').order_by('-created')
# asked_questions = Question.objects.filter(
# asked_by=request.user
# ).select_related('asked_to').order_by('-created')
#
# context = {
# 'unanswered_questions': unanswered_questions,
# 'asked_questions': asked_questions,
# 'total_likes': get_total_likes(request.user)
# }
# return render(request, 'askfm/my_profile.html', context)
def homepage(request):
if request.user.is_authenticated():
return redirect(
reverse('askfm:user_profile', args=(request.user.username, ))
)
random_users = User.objects.order_by('?')[:20]
context = {
'random_users': random_users
}
return render(request, 'askfm/homepage.html', context)
def user_profile(request, username):
user = get_object_or_404(User, username=username)
answered_questions = Question.objects.exclude(answer=None).filter(
asked_to=user).select_related('answer').order_by('-created')
asked_questions = Question.objects.filter(
asked_by=user).select_related('answer').order_by('-created')
if request.method == 'POST':
if not request.user.is_authenticated():
messages.error('You must login first!')
return redirect(
reverse('auth:login') + '?next=/{}/'.format(username))
form = QuestionForm(request.POST)
if form.is_valid():
q = Question(
asked_by=request.user,
asked_to=get_object_or_404(User, username=username),
text=form.cleaned_data['question_text'],
anonymous=form.cleaned_data.get('anonymous', False)
)
q.save()
messages.success(request, 'Your question has been submitted!')
return redirect(reverse('askfm:user_profile', args=(username,)))
else:
form = QuestionForm()
context = {
'username': username,
'answered_questions': answered_questions,
'asked_questions': asked_questions,
'form': form,
'total_likes': get_total_likes(user)
}
return render(request, 'askfm/user_profile.html', context)
@login_required
@require_POST
def answer(request):
question_id = request.POST.get('question-id')
answer_text = request.POST.get('answer-text')
if question_id and answer_text:
question = get_object_or_404(
Question, id=question_id, asked_to=request.user
)
answer = Answer.objects.create(text=answer_text, question=question)
messages.success(request, 'Answer submitted successfully!')
else:
messages.error(request, 'Something went wrong.', extra_tags='danger')
return redirect(
reverse('askfm:user_profile', args=(request.user.username, ))
)
@login_required
def friends(request, username):
if request.user.username != username:
return redirect(
reverse('askfm:user_profile', args=(request.user.username, ))
)
following = User.objects.filter(following__followed_by=request.user)
context = {
'following': following
}
return render(request, 'askfm/friends.html', context=context)
| mit | -7,572,416,600,820,695,000 | 34.772727 | 79 | 0.627192 | false | 3.974747 | false | false | false |
rvykydal/anaconda | pyanaconda/modules/common/structures/comps.py | 3 | 4409 | #
# DBus structure for comps definitions.
#
# Copyright (C) 2021 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from dasbus.structure import DBusData
from dasbus.typing import * # pylint: disable=wildcard-import
__all__ = ["CompsGroupData", "CompsEnvironmentData"]
class CompsGroupData(DBusData):
"""Comps group data."""
def __init__(self):
self._id = ""
self._name = ""
self._description = ""
@property
def id(self) -> Str:
"""Unique identifier of the group.
:return: a string
"""
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def name(self) -> Str:
"""Translated name of the group.
:return: a translated string
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self) -> Str:
"""Translated description of the group.
:return: a translated string
"""
return self._description
@description.setter
def description(self, value):
self._description = value
class CompsEnvironmentData(DBusData):
"""Comps environment data."""
def __init__(self):
self._id = ""
self._name = ""
self._description = ""
self._optional_groups = []
self._default_groups = []
self._visible_groups = []
@property
def id(self) -> Str:
"""Unique identifier of the environment.
:return: a string
"""
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def name(self) -> Str:
"""Translated name of the environment.
:return: a translated string
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self) -> Str:
"""Translated description of the environment.
:return: a translated string
"""
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def optional_groups(self) -> List[Str]:
"""List of optional groups.
These groups don't have to be installed for
successful installation of the environment.
:return: a list of group identifiers
"""
return self._optional_groups
@optional_groups.setter
def optional_groups(self, value):
self._optional_groups = value
@property
def default_groups(self) -> List[Str]:
"""List of default optional groups.
These groups don't have to be installed for
successful installation of the environment,
but they will be pre-selected by default.
:return: a list of group identifiers
"""
return self._default_groups
@default_groups.setter
def default_groups(self, value):
self._default_groups = value
@property
def visible_groups(self) -> List[Str]:
"""List of user-visible groups.
These groups are not defined by the environment,
but they supplement the list of optional groups
that can be selected by users.
:return: a list of group identifiers
"""
return self._visible_groups
@visible_groups.setter
def visible_groups(self, value):
self._visible_groups = value
def get_available_groups(self) -> List[Str]:
"""Get a list of groups available for the user selection.
:return: a list of group identifiers
"""
return list(dict.fromkeys(
self.optional_groups
+ self.default_groups
+ self.visible_groups
))
| gpl-2.0 | -6,337,888,965,697,843,000 | 24.485549 | 71 | 0.611703 | false | 4.378352 | false | false | false |
khosrow/metpx | sundew/lib/aftn/StateAFTN.py | 1 | 1295 | """
#############################################################################################
# Name: StateAFTN.py
#
# Author: Daniel Lemay
#
# Date: 2006-06-06
#
# Description: Keep the AFTN state in this object
#
#############################################################################################
"""
import os, sys, pickle
sys.path.insert(1,sys.path[0] + '/..')
sys.path.insert(1,sys.path[0] + '/../importedLibs')
class StateAFTN:
def __init__(self):
self.CSN = None
self.waitedTID = None
self.lastAckReceived = None
self.waitingForAck = None
def fill(self, messageManager):
self.CSN = messageManager.CSN
self.waitedTID = messageManager.waitedTID
self.lastAckReceived = messageManager.lastAckReceived
self.waitingForAck = messageManager.waitingForAck
def clear(self):
self.CSN = None
self.waitedTID = None
self.lastAckReceived = None
self.waitingForAck = None
def infos(self):
return """
CSN = %s
waitedTID = %s
lastAckReceived = %s
waitingForAck = %s
""" % (self.CSN, self.waitedTID, self.lastAckReceived, self.waitingForAck)
if __name__ == "__main__":
state = StateAFTN()
print(state.infos())
| gpl-2.0 | 6,357,349,060,125,316,000 | 22.981481 | 93 | 0.518147 | false | 3.742775 | false | false | false |
SidSachdev/pycrunchbase | src/pycrunchbase/resource/person.py | 2 | 1469 | import six
from .node import Node
from .utils import parse_date
@six.python_2_unicode_compatible
class Person(Node):
"""Represents a Person on CrunchBase"""
KNOWN_RELATIONSHIPS = [
"primary_affiliation",
"primary_location",
"primary_image",
"websites",
"degrees",
"jobs",
"advisory_roles",
"founded_companies",
"investments",
"memberships",
"images",
"videos",
"news",
]
KNOWN_PROPERTIES = [
"permalink",
"api_path",
"web_path",
"last_name",
"first_name",
"also_known_as",
"bio",
"role_investor",
"born_on",
"born_on_trust_code",
"is_deceased",
"died_on",
"died_on_trust_code",
"created_at",
"updated_at",
]
def _coerce_values(self):
"""A delegate method to handle parsing all data and converting
them into python values
"""
# special cases to convert strings to pythonic value
for attr in ['born_on', 'died_on']:
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
def __str__(self):
return u'{first} {last} ({permalink})'.format(
first=self.first_name,
last=self.last_name,
permalink=self.permalink,
)
def __repr__(self):
return self.__str__()
| mit | -1,112,548,700,295,432,700 | 22.693548 | 70 | 0.514636 | false | 3.728426 | false | false | false |
openearth/delft3d-gt-server | delft3dworker/migrations/0090_auto_20180530_0832.py | 1 | 1719 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-05-30 08:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("delft3dworker", "0089_template_shortname"),
]
operations = [
migrations.AddField(
model_name="workflow",
name="action_log",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="workflow",
name="cluster_log",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="workflow",
name="yaml",
field=models.FileField(default="", upload_to="workflows/"),
),
migrations.AlterField(
model_name="scene",
name="phase",
field=models.PositiveSmallIntegerField(
choices=[
(0, "New"),
(6, "Idle: waiting for user input"),
(11, "Starting workflow"),
(12, "Running workflow"),
(13, "Removing workflow"),
(500, "Finished"),
(501, "Failed"),
],
default=0,
),
),
migrations.AlterField(
model_name="template",
name="shortname",
field=models.CharField(default="gt", max_length=256),
),
migrations.AlterField(
model_name="template",
name="yaml_template",
field=models.FileField(default="", upload_to="workflow_templates/"),
),
]
| gpl-3.0 | -4,754,983,757,135,423,000 | 29.696429 | 80 | 0.492728 | false | 4.761773 | false | false | false |
JKrehl/Electrons | Electrons/Scattering/Operators/AbstractArray.py | 1 | 3918 | #!/usr/bin/env python
"""
Copyright (c) 2015 Jonas Krehl <[email protected]>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
import numpy
try:
import reikna.cluda.cuda
CUDA = True
except ImportError:
CUDA = False
import reikna.cluda.ocl
class AbstractArray:
_modes = {}
def __new__(cls, array, mode = "numpy", *args, **kwargs):
assert mode in cls._modes
if mode in cls._modes:
if isinstance(array, AbstractArray):
return array._as(mode, *args, **kwargs)
else:
return cls._modes[mode].from_ndarray(array, *args, **kwargs)
@classmethod
def from_ndarray(cls, array, *args, **kwargs):
raise NotImplemented
def to_ndarray(self):
raise NotImplemented
def to_same_mode(self, *args, **kwargs):
return self
def _as(self, mode, *args, **kwargs):
assert mode in self._modes
if mode == self.mode:
return self.to_same_mode(*args, **kwargs)
else:
return self._modes[mode].from_ndarray(self.to_ndarray(), *args, **kwargs)
class AbstractArray_Numpy(numpy.ndarray, AbstractArray):
mode = "numpy"
@classmethod
def from_ndarray(cls, array):
self = array.view(__class__)
return self
def to_ndarray(self):
return self.view(numpy.ndarray)
AbstractArray._modes['numpy'] = AbstractArray_Numpy
if CUDA:
class AbstractArray_CUDA(reikna.cluda.cuda.Array, AbstractArray):
mode = "cuda"
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get_thread(thread=None):
if isinstance(thread, reikna.cluda.cuda.Thread):
return thread
elif isinstance(thread, reikna.cluda.ocl.Thread):
raise TypeError("Thread of wrong CLUDA Backend given")
else:
return reikna.cluda.cuda.Thread.create()
@classmethod
def from_ndarray(cls, array, thread = None):
if isinstance(thread, reikna.cluda.cuda.Thread):
pass
elif isinstance(thread, reikna.cluda.ocl.Thread):
raise TypeError("Thread of wrong CLUDA Backend given")
else:
thread = reikna.cluda.cuda.Thread.create()
self = __class__.get_thread(thread).to_device(array)
self.__class__ = __class__
return self
def to_ndarray(self):
return self.get()
def to_same_mode(self, thread=None):
if self.thread == thread or thread == None:
return self
else:
return self.from_ndarray(self.to_ndarray(), thread)
AbstractArray._modes['cuda'] = AbstractArray_CUDA
class AbstractArray_OpenCL(reikna.cluda.ocl.Array, AbstractArray):
mode = "opencl"
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get_thread(thread=None):
if isinstance(thread, reikna.cluda.ocl.Thread):
return thread
elif isinstance(thread, reikna.cluda.cuda.Thread):
raise TypeError("Thread of wrong CLUDA Backend given")
else:
return reikna.cluda.ocl.Thread.create()
@classmethod
def from_ndarray(cls, array, thread=None):
self = __class__.get_thread(thread).to_device(array)
self.__class__ = __class__
return self
def to_ndarray(self):
return self.get()
def to_same_mode(self, thread = None):
if self.thread == thread or thread == None:
return self
else:
return self.from_ndarray(self.to_ndarray(), thread)
AbstractArray._modes['opencl'] = AbstractArray_OpenCL
| isc | -3,605,840,153,688,494,000 | 26.591549 | 76 | 0.695763 | false | 3.273183 | false | false | false |
jepegit/cellpy | cellpy/utils/batch_tools/engines.py | 1 | 5883 | """Engines are functions that are used by the Do-ers.
Keyword Args: experiments, farms, barn
Returns: farms, barn
"""
import time
import logging
import pandas as pd
from cellpy import dbreader
from cellpy.parameters.internal_settings import get_headers_journal
from cellpy.utils.batch_tools import batch_helpers as helper
# logger = logging.getLogger(__name__)
SELECTED_SUMMARIES = [
"discharge_capacity",
"charge_capacity",
"coulombic_efficiency",
"cumulated_coulombic_efficiency",
"ir_discharge",
"ir_charge",
"end_voltage_discharge",
"end_voltage_charge",
"charge_c_rate",
"discharge_c_rate",
]
hdr_journal = get_headers_journal()
def cycles_engine(**kwargs):
"""engine to extract cycles"""
logging.debug("cycles_engine::Not finished yet (sorry).")
# raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir" # Its a murder in the red barn - murder in the red barn
for experiment in experiments:
farms.append([])
if experiment.all_in_memory:
logging.debug("all in memory")
for key in experiment.cell_data_frames:
logging.debug(f"extracting cycles from {key}")
# extract cycles here and send it to the farm
else:
logging.debug("dont have it in memory - need to lookup in the files")
for key in experiment.cell_data_frames:
logging.debug(f"looking up cellpyfile for {key}")
# extract cycles here and send it to the farm
return farms, barn
def raw_data_engine(**kwargs):
"""engine to extract raw data"""
logging.debug("cycles_engine")
raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir"
for experiment in experiments:
farms.append([])
return farms, barn
def summary_engine(**kwargs):
"""engine to extract summary data"""
logging.debug("summary_engine")
# farms = kwargs["farms"]
farms = []
experiments = kwargs["experiments"]
for experiment in experiments:
if experiment.selected_summaries is None:
selected_summaries = SELECTED_SUMMARIES
else:
selected_summaries = experiment.selected_summaries
if experiment.summary_frames is None:
logging.debug("No summary frames found")
logging.debug("Re-loading")
experiment.summary_frames = _load_summaries(experiment)
farm = helper.join_summaries(experiment.summary_frames, selected_summaries)
farms.append(farm)
barn = "batch_dir"
return farms, barn
def _load_summaries(experiment):
summary_frames = {}
for label in experiment.cell_names:
# TODO: replace this with direct lookup from hdf5?
summary_frames[label] = experiment.data[label].cell.summary
return summary_frames
def dq_dv_engine(**kwargs):
"""engine that performs incremental analysis of the cycle-data"""
farms = None
barn = "raw_dir"
return farms, barn
def simple_db_engine(reader=None, srnos=None, **kwargs):
"""engine that gets values from the simple excel 'db'"""
# This is not really a proper Do-er engine. But not sure where to put it.
if reader is None:
reader = dbreader.Reader()
logging.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict[hdr_journal["filename"]] = [reader.get_cell_name(srno) for srno in srnos]
info_dict[hdr_journal["mass"]] = [reader.get_mass(srno) for srno in srnos]
info_dict[hdr_journal["total_mass"]] = [
reader.get_total_mass(srno) for srno in srnos
]
info_dict[hdr_journal["loading"]] = [reader.get_loading(srno) for srno in srnos]
info_dict[hdr_journal["nom_cap"]] = [reader.get_nom_cap(srno) for srno in srnos]
info_dict[hdr_journal["experiment"]] = [
reader.get_experiment_type(srno) for srno in srnos
]
info_dict[hdr_journal["fixed"]] = [
reader.inspect_hd5f_fixed(srno) for srno in srnos
]
info_dict[hdr_journal["label"]] = [reader.get_label(srno) for srno in srnos]
info_dict[hdr_journal["cell_type"]] = [reader.get_cell_type(srno) for srno in srnos]
info_dict[hdr_journal["instrument"]] = [
reader.get_instrument(srno) for srno in srnos
]
info_dict[hdr_journal["raw_file_names"]] = []
info_dict[hdr_journal["cellpy_file_name"]] = []
info_dict[hdr_journal["comment"]] = [reader.get_comment(srno) for srno in srnos]
logging.debug(f"created info-dict from {reader.db_file}:")
# logging.debug(info_dict)
for key in list(info_dict.keys()):
logging.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logging.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict[hdr_journal["group"]] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache, **kwargs)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logging.critical(
"The function _find_files was very slow. "
"Save your journal so you don't have to run it again! "
"You can load it again using the from_journal(journal_name) method."
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values([hdr_journal.group, hdr_journal.filename])
info_df = helper.make_unique_groups(info_df)
info_df[hdr_journal.label] = info_df[hdr_journal.filename].apply(
helper.create_labels
)
# TODO: check if drop=False works [#index]
info_df.set_index(hdr_journal["filename"], inplace=True) # edit this to allow for
# non-nummeric index-names (for tab completion and python-box)
return info_df
| mit | 8,426,626,545,050,913,000 | 31.324176 | 88 | 0.646269 | false | 3.550392 | false | false | false |
apfeltee/clearsilver | python/examples/base/wordwrap.py | 13 | 1060 | """
WordWrapping
"""
import os, sys, string, time, getopt
import re
def WordWrap(text, cols=70, detect_paragraphs = 0, is_header = 0):
text = string.replace(text,"\r\n", "\n") # remove CRLF
def nlrepl(matchobj):
if matchobj.group(1) != ' ' and matchobj.group(2) != ' ':
repl_with = ' '
else:
repl_with = ''
return matchobj.group(1) + repl_with + matchobj.group(2)
if detect_paragraphs:
text = re.sub("([^\n])\n([^\n])",nlrepl,text)
body = []
i = 0
j = 0
ltext = len(text)
while i<ltext:
if i+cols < ltext:
r = string.find(text, "\n", i, i+cols)
j = r
if r == -1:
j = string.rfind(text, " ", i, i+cols)
if j == -1:
r = string.find(text, "\n", i+cols)
if r == -1: r = ltext
j = string.find(text, " ", i+cols)
if j == -1: j = ltext
j = min(j, r)
else:
j = ltext
body.append(string.strip(text[i:j]))
i = j+1
if is_header:
body = string.join(body, "\n ")
else:
body = string.join(body, "\n")
return body
| bsd-2-clause | -6,697,434,188,681,011,000 | 20.2 | 66 | 0.515094 | false | 2.789474 | false | false | false |
itaton/sempreDecoder | decode.py | 1 | 2000 | import struct
f = open("/tmp/rtl.dat", "rb")
#i = 0 #for debugging only
p = 0 #pause counter
wasHigh = 0 #set when signal was high
r = "" #the sent bit string
threshold = 1500 #needs to be set between high and low, depends on gain of sdr-stick
#samplemax = 0 #for debugging only
resultArray = [] #Stores the 12 packages that are send
try:
s = f.read(1) #16 bits are one sample
s += f.read(1)
while s:
sample = struct.unpack('<H', s)[0] #samples are in little endian
#print(sample) #debugging
#if (sample > samplemax and sample < 5000):
# samplemax = sample
if (sample > threshold):
#print(sample)
wasHigh = 1
if (p != 0):
if (p >= 27 and p <= 31): #short pause -> 0
r = r + "0"
if (p >= 56 and p <= 62): #medium pause -> 1
r += "1"
if (p > 100): #long pause -> transmission of one package ended. The package is send 12 times
resultArray.append(r)
r = ""
#print(p)
p = 0
if (sample < threshold and (wasHigh == 1 or p != 0)):
wasHigh = 0
p += 1
#i += 1
s = f.read(1)
s+= f.read(1)
finally:
resultArray.append(r)
#Check for transmission/decoding error - this assumes there is max 1 error in the first 3 transmissions
if (resultArray[0] == resultArray[1]):
#print(resultArray[0]) #resulting bitstring that was transmitted
data = resultArray[0]
else:
#print(resultArray[2])
data = resultArray[2]
humidity = int(data[-8:], 2)
print("Humidity:", humidity)
temp = int(data[12:-12], 2)
if (temp & 0x800 != 0):
temp = ~temp
temp = temp & 0xFFF
temp += 1
temp = temp/10.0
print("Temperature:", "-" + str(temp))
else:
temp = temp/10.0
print("Temperature:", temp)
f.close()
| unlicense | -8,428,295,885,252,208,000 | 29.769231 | 108 | 0.519 | false | 3.703704 | false | false | false |
nkmk/python-snippets | notebook/pandas_cumsum_cumprod.py | 1 | 2097 | import pandas as pd
print(pd.__version__)
# 1.0.5
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=['X', 'Y', 'Z'])
print(df)
# A B
# X 1 4
# Y 2 5
# Z 3 6
print(df.cumsum())
# A B
# X 1 4
# Y 3 9
# Z 6 15
print(df.cumsum(axis=1))
# A B
# X 1 5
# Y 2 7
# Z 3 9
print(df.cumprod())
# A B
# X 1 4
# Y 2 20
# Z 6 120
print(df.cumprod(axis=1))
# A B
# X 1 4
# Y 2 10
# Z 3 18
print(df['B'])
# X 4
# Y 5
# Z 6
# Name: B, dtype: int64
print(type(df['B']))
# <class 'pandas.core.series.Series'>
print(df['B'].cumsum())
# X 4
# Y 9
# Z 15
# Name: B, dtype: int64
print(df['B'].cumprod())
# X 4
# Y 20
# Z 120
# Name: B, dtype: int64
df_nan = pd.DataFrame({'A': [1, 2, 3], 'B': [4, float('nan'), 6]}, index=['X', 'Y', 'Z'])
print(df_nan)
# A B
# X 1 4.0
# Y 2 NaN
# Z 3 6.0
print(df_nan.cumsum())
# A B
# X 1 4.0
# Y 3 NaN
# Z 6 10.0
print(float('nan') + 4)
# nan
print(df_nan.cumsum(skipna=False))
# A B
# X 1 4.0
# Y 3 NaN
# Z 6 NaN
print(df_nan.cumprod())
# A B
# X 1 4.0
# Y 2 NaN
# Z 6 24.0
print(df_nan.cumprod(skipna=False))
# A B
# X 1 4.0
# Y 2 NaN
# Z 6 NaN
df2 = pd.DataFrame({'A': [1, 4, 2], 'B': [6, 3, 5]}, index=['X', 'Y', 'Z'])
print(df2)
# A B
# X 1 6
# Y 4 3
# Z 2 5
print(df2.cummax())
# A B
# X 1 6
# Y 4 6
# Z 4 6
print(df2.cummax(axis=1))
# A B
# X 1 6
# Y 4 4
# Z 2 5
print(df2.cummin())
# A B
# X 1 6
# Y 1 3
# Z 1 3
print(df2.cummin(axis=1))
# A B
# X 1 1
# Y 4 3
# Z 2 2
df2_nan = pd.DataFrame({'A': [1, 4, 2], 'B': [6, float('nan'), 5]}, index=['X', 'Y', 'Z'])
print(df2_nan)
# A B
# X 1 6.0
# Y 4 NaN
# Z 2 5.0
print(df2_nan.cummax())
# A B
# X 1 6.0
# Y 4 NaN
# Z 4 6.0
print(df2_nan.cummax(skipna=False))
# A B
# X 1 6.0
# Y 4 NaN
# Z 4 NaN
print(df2_nan.cummin())
# A B
# X 1 6.0
# Y 1 NaN
# Z 1 5.0
print(df2_nan.cummin(skipna=False))
# A B
# X 1 6.0
# Y 1 NaN
# Z 1 NaN
| mit | 3,543,883,457,061,147,600 | 12.796053 | 90 | 0.451598 | false | 1.868984 | false | false | false |
rsj217/tornado--scaffold | tornapro/ghost/app/helper.py | 1 | 1216 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'ghost'
import urlparse
import tornado.web
from tornado.httputil import HTTPServerRequest
from .auth.models import User
class BaseAuthHandler(tornado.web.RequestHandler):
def get_current_user(self):
cookie = self.get_secure_cookie('user')
if cookie:
self._current_user = User.getone(pk=int(cookie))
return self._current_user
self.clear_cookie('user')
return
def extract_params(request):
if not isinstance(request, HTTPServerRequest):
request = request.request
parse_url = urlparse.urlparse(request.uri)
path, params, query, fragment = parse_url.path, parse_url.params, parse_url.query, parse_url.fragment
uri = urlparse.urlunparse((request.protocol, request.host, path, params, query, fragment))
http_method = request.method
headers = request.headers
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
if 'HTTP_AUTHORIZATION' in headers:
headers['Authorization'] = headers['HTTP_AUTHORIZATION']
body = request.body
return uri, http_method, body, headers | mit | 9,117,913,689,934,622,000 | 28.682927 | 105 | 0.675987 | false | 3.872611 | false | false | false |
timcera/mettoolbox | mettoolbox/meteolib.py | 1 | 16363 | # -*- coding: utf-8 -*-
"""
Library of functions for meteorology.
Meteorological function names
=============================
- cp_calc: Calculate specific heat
- Delta_calc: Calculate slope of vapour pressure curve
- es_calc: Calculate saturation vapour pressures
- ea_calc: Calculate actual vapour pressures
- gamma_calc: Calculate psychrometric constant
- L_calc: Calculate latent heat of vapourisation
- pottemp: Calculate potential temperature (1000 hPa reference pressure)
- rho_calc: Calculate air density
- sun_NR: Maximum sunshine duration [h] and extraterrestrial radiation [J/day]
- vpd_calc: Calculate vapour pressure deficits
- windvec: Calculate average wind direction and speed
Module requires and imports math and scipy modules.
Tested for compatibility with Python 2.7.
Function descriptions
=====================
"""
import math
import numpy as np
import pandas as pd
import scipy
def _arraytest(*args):
"""
Function to convert input parameters in as lists or tuples to
arrays, while leaving single values intact.
Test function for single values or valid array parameter input
(J. Delsman).
Parameters:
args (array, list, tuple, int, float): Input values for functions.
Returns:
rargs (array, int, float): Valid single value or array function input.
Examples
--------
>>> _arraytest(12.76)
12.76
>>> _arraytest([(1,2,3,4,5),(6,7,8,9)])
array([(1, 2, 3, 4, 5), (6, 7, 8, 9)], dtype=object)
>>> x=[1.2,3.6,0.8,1.7]
>>> _arraytest(x)
array([ 1.2, 3.6, 0.8, 1.7])
>>> _arraytest('This is a string')
'This is a string'
"""
rargs = []
for a in args:
if isinstance(a, (list, tuple)):
rargs.append(scipy.array(a))
else:
rargs.append(a)
if len(rargs) == 1:
return rargs[0] # no unpacking if single value, return value i/o list
return rargs
def cp_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])):
"""
Function to calculate the specific heat of air:
.. math::
c_p = 0.24 \\cdot 4185.5 \\cdot \\left(1 + 0.8 \\cdot \\frac{0.622 \\cdot e_a}{p - e_a}\\right)
where ea is the actual vapour pressure calculated from the relative
humidity and p is the ambient air pressure.
Parameters:
- airtemp: (array of) air temperature [Celsius].
- rh: (array of) relative humidity data [%].
- airpress: (array of) air pressure data [Pa].
Returns:
cp: array of saturated c_p values [J kg-1 K-1].
References
----------
R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop
Evaporation Guidelines for computing crop water requirements,
FAO - Food and Agriculture Organization of the United Nations.
Irrigation and drainage paper 56, Chapter 3. Rome, Italy.
(http://www.fao.org/docrep/x0490e/x0490e07.htm)
Examples
--------
>>> cp_calc(25,60,101300)
1014.0749457208065
>>> t = [10, 20, 30]
>>> rh = [10, 20, 30]
>>> airpress = [100000, 101000, 102000]
>>> cp_calc(t,rh,airpress)
array([ 1005.13411289, 1006.84399787, 1010.83623841])
"""
# Test input array/value
airtemp, rh, airpress = _arraytest(airtemp, rh, airpress)
# calculate vapour pressures
eact = ea_calc(airtemp, rh)
# Calculate cp
cp = 0.24 * 4185.5 * (1 + 0.8 * (0.622 * eact / (airpress - eact)))
return cp # in J/kg/K
def Delta_calc(airtemp=scipy.array([])):
"""
Function to calculate the slope of the temperature - vapour pressure curve
(Delta) from air temperature T:
.. math::
\\Delta = 1000 \\cdot \\frac{e_s \\cdot 4098}{(T + 237.3)^2}
where es is the saturated vapour pressure at temperature T.
Parameters:
- airtemp: (array of) air temperature [Celsius].
Returns:
- Delta: (array of) slope of saturated vapour curve [Pa K-1].
References
----------
Technical regulations 49, World Meteorological Organisation, 1984.
Appendix A. 1-Ap-A-3.
Examples
--------
>>> Delta_calc(30.0)
243.34309166827094
>>> x = [20, 25]
>>> Delta_calc(x)
array([ 144.6658414 , 188.62504569])
"""
# Test input array/value
airtemp = _arraytest(airtemp)
# calculate saturation vapour pressure at temperature
es = es_calc(airtemp) # in kPa
# Calculate Delta
Delta = es * 4098.0 / ((airtemp + 237.3) ** 2) * 1000
return Delta # in Pa/K
def ea_calc(airtemp=scipy.array([]), rh=scipy.array([])):
"""
Function to calculate actual vapour pressure from relative humidity:
.. math::
e_a = \\frac{rh \\cdot e_s}{100}
where es is the saturated vapour pressure at temperature T.
Parameters:
- airtemp: array of measured air temperatures [Celsius].
- rh: Relative humidity [%].
Returns:
- ea: array of actual vapour pressure [Pa].
Examples
--------
>>> ea_calc(25,60)
1900.0946514729308
"""
# Test input array/value
airtemp, rh = _arraytest(airtemp, rh)
# Calculate saturation vapour pressures
es = es_calc(airtemp) * 10 # kPa convert to hPa
# Calculate actual vapour pressure
eact = rh / 100.0 * es
return eact # in Pa
def es_calc(airtemp):
"""
Function to calculate saturated vapour pressure from temperature.
Uses the Arden-Buck equations.
Parameters:
- airtemp : (data-type) measured air temperature [Celsius].
Returns:
- es : (data-type) saturated vapour pressure [kPa].
References
----------
https://en.wikipedia.org/wiki/Arden_Buck_equation
Buck, A. L. (1981), "New equations for computing vapor pressure and enhancement
factor", J. Appl. Meteorol., 20: 1527–1532
Buck (1996), Buck Research CR-1A User's Manual, Appendix 1. (PDF)
Examples
--------
>>> es_calc(30.0)
4.245126
>>> x = [20, 25]
>>> es_calc(x)
array([ 2.338340, 3.168531])
"""
airtemp = pd.to_numeric(airtemp, errors="coerce")
# Calculate saturated vapour pressures, distinguish between water/ice
mask = airtemp > 0
es = pd.Series(0.0, index=airtemp.index)
# Calculate saturation vapour pressure over liquid water.
es[mask] = 6.1121 * np.exp(
(
(18.678 - (airtemp[mask] / 234.5))
* (airtemp[mask] / (257.14 + airtemp[mask]))
).astype(float)
)
# Calculate saturation vapour pressure for ice
es[~mask] = 6.1115 * np.exp(
(
(23.036 - (airtemp[~mask] / 333.7))
* (airtemp[~mask] / (279.82 + airtemp[~mask]))
).astype(float)
)
# Convert from hPa to kPa
es = es / 10.0
return es # in kPa
def gamma_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])):
"""
Function to calculate the psychrometric constant gamma.
.. math::
\\gamma = \\frac{c_p \\cdot p}{0.66 \\cdot \\lambda}
where p is the air pressure and lambda the latent heat of vapourisation.
Parameters:
- airtemp: array of measured air temperature [Celsius].
- rh: array of relative humidity values[%].
- airpress: array of air pressure data [Pa].
Returns:
- gamma: array of psychrometric constant values [Pa K-1].
References
----------
J. Bringfelt. Test of a forest evapotranspiration model. Meteorology and
Climatology Reports 52, SMHI, Norrköpping, Sweden, 1986.
Examples
--------
>>> gamma_calc(10,50,101300)
66.26343318657227
>>> t = [10, 20, 30]
>>> rh = [10, 20, 30]
>>> airpress = [100000, 101000, 102000]
>>> gamma_calc(t,rh,airpress)
array([ 65.25518798, 66.65695779, 68.24239285])
"""
# Test input array/value
airtemp, rh, airpress = _arraytest(airtemp, rh, airpress)
# Calculate cp and Lambda values
cp = cp_calc(airtemp, rh, airpress)
L = L_calc(airtemp)
# Calculate gamma
gamma = cp * airpress / (0.622 * L)
return gamma # in Pa\K
def L_calc(airtemp=scipy.array([])):
"""
Function to calculate the latent heat of vapourisation from air temperature.
Parameters:
- airtemp: (array of) air temperature [Celsius].
Returns:
- L: (array of) lambda [J kg-1 K-1].
References
----------
J. Bringfelt. Test of a forest evapotranspiration model. Meteorology and
Climatology Reports 52, SMHI, Norrköpping, Sweden, 1986.
Examples
--------
>>> L_calc(25)
2440883.8804625
>>> t=[10, 20, 30]
>>> L_calc(t)
array([ 2476387.3842125, 2452718.3817125, 2429049.3792125])
"""
# Test input array/value
airtemp = _arraytest(airtemp)
# Calculate lambda
L = 4185.5 * (751.78 - 0.5655 * (airtemp + 273.15))
return L # in J/kg
def pottemp(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])):
"""
Function to calculate the potential temperature air, theta, from air
temperatures, relative humidity and air pressure. Reference pressure
1000 hPa.
Parameters:
- airtemp: (array of) air temperature data [Celsius].
- rh: (array of) relative humidity data [%].
- airpress: (array of) air pressure data [Pa].
Returns:
- theta: (array of) potential air temperature data [Celsius].
Examples
--------
>>> t = [5, 10, 20]
>>> rh = [45, 65, 89]
>>> airpress = [101300, 102000, 99800]
>>> pottemp(t,rh,airpress)
array([ 3.97741582, 8.40874555, 20.16596828])
>>> pottemp(5,45,101300)
3.977415823848844
"""
# Test input array/value
airtemp, rh, airpress = _arraytest(airtemp, rh, airpress)
# Determine cp
cp = cp_calc(airtemp, rh, airpress)
# Determine theta
theta = (airtemp + 273.15) * pow((100000.0 / airpress), (287.0 / cp)) - 273.15
return theta # in degrees celsius
def rho_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])):
"""
Function to calculate the density of air, rho, from air
temperatures, relative humidity and air pressure.
.. math::
\\rho = 1.201 \\cdot \\frac{290.0 \\cdot (p - 0.378 \\cdot e_a)}{1000 \\cdot (T + 273.15)} / 100
Parameters:
- airtemp: (array of) air temperature data [Celsius].
- rh: (array of) relative humidity data [%].
- airpress: (array of) air pressure data [Pa].
Returns:
- rho: (array of) air density data [kg m-3].
Examples
--------
>>> t = [10, 20, 30]
>>> rh = [10, 20, 30]
>>> airpress = [100000, 101000, 102000]
>>> rho_calc(t,rh,airpress)
array([ 1.22948419, 1.19787662, 1.16635358])
>>> rho_calc(10,50,101300)
1.2431927125520903
"""
# Test input array/value
airtemp, rh, airpress = _arraytest(airtemp, rh, airpress)
# Calculate actual vapour pressure
eact = ea_calc(airtemp, rh)
# Calculate density of air rho
rho = (
1.201
* (290.0 * (airpress - 0.378 * eact))
/ (1000.0 * (airtemp + 273.15))
/ 100.0
)
return rho # in kg/m3
def sun_NR(doy=scipy.array([]), lat=float):
"""
Function to calculate the maximum sunshine duration [h] and incoming
radiation [MJ/day] at the top of the atmosphere from day of year and
latitude.
Parameters:
- doy: (array of) day of year.
- lat: latitude in decimal degrees, negative for southern hemisphere.
Returns:
- N: (float, array) maximum sunshine hours [h].
- Rext: (float, array) extraterrestrial radiation [J day-1].
Notes
-----
Only valid for latitudes between 0 and 67 degrees (i.e. tropics
and temperate zone).
References
----------
R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop
Evaporation - Guidelines for computing crop water requirements,
FAO - Food and Agriculture Organization of the United Nations.
Irrigation and drainage paper 56, Chapter 3. Rome, Italy.
(http://www.fao.org/docrep/x0490e/x0490e07.htm)
Examples
--------
>>> sun_NR(50,60)
(9.1631820597268163, 9346987.824773483)
>>> days = [100,200,300]
>>> latitude = 52.
>>> sun_NR(days,latitude)
(array([ 13.31552077, 15.87073276, 9.54607624]), array([ 29354803.66244921, 39422316.42084264, 12619144.54566777]))
"""
# Test input array/value
doy, lat = _arraytest(doy, lat)
# Set solar constant [W/m2]
S = 1367.0 # [W/m2]
# Print warning if latitude is above 67 degrees
if abs(lat) > 67.0:
print("WARNING: Latitude outside range of application (0-67 degrees).\n)")
# Convert latitude [degrees] to radians
latrad = lat * math.pi / 180.0
# calculate solar declination dt [radians]
dt = 0.409 * scipy.sin(2 * math.pi / 365 * doy - 1.39)
# calculate sunset hour angle [radians]
ws = scipy.arccos(-scipy.tan(latrad) * scipy.tan(dt))
# Calculate sunshine duration N [h]
N = 24 / math.pi * ws
# Calculate day angle j [radians]
j = 2 * math.pi / 365.25 * doy
# Calculate relative distance to sun
dr = 1.0 + 0.03344 * scipy.cos(j - 0.048869)
# Calculate Rext
Rext = (
S
* 86400
/ math.pi
* dr
* (
ws * scipy.sin(latrad) * scipy.sin(dt)
+ scipy.sin(ws) * scipy.cos(latrad) * scipy.cos(dt)
)
)
return N, Rext
def vpd_calc(airtemp=scipy.array([]), rh=scipy.array([])):
"""
Function to calculate vapour pressure deficit.
Parameters:
- airtemp: measured air temperatures [Celsius].
- rh: (array of) rRelative humidity [%].
Returns:
- vpd: (array of) vapour pressure deficits [Pa].
Examples
--------
>>> vpd_calc(30,60)
1697.090397862653
>>> T=[20,25]
>>> RH=[50,100]
>>> vpd_calc(T,RH)
array([ 1168.54009896, 0. ])
"""
# Test input array/value
airtemp, rh = _arraytest(airtemp, rh)
# Calculate saturation vapour pressures
es = es_calc(airtemp) * 10 # kPa convert to hPa
eact = ea_calc(airtemp, rh)
# Calculate vapour pressure deficit
vpd = es - eact
return vpd # in hPa
def windvec(u=scipy.array([]), D=scipy.array([])):
"""
Function to calculate the wind vector from time series of wind
speed and direction.
Parameters:
- u: array of wind speeds [m s-1].
- D: array of wind directions [degrees from North].
Returns:
- uv: Vector wind speed [m s-1].
- Dv: Vector wind direction [degrees from North].
Examples
--------
>>> u = scipy.array([[ 3.],[7.5],[2.1]])
>>> D = scipy.array([[340],[356],[2]])
>>> windvec(u,D)
(4.162354202836905, array([ 353.2118882]))
>>> uv, Dv = windvec(u,D)
>>> uv
4.162354202836905
>>> Dv
array([ 353.2118882])
"""
# Test input array/value
u, D = _arraytest(u, D)
ve = 0.0 # define east component of wind speed
vn = 0.0 # define north component of wind speed
D = D * math.pi / 180.0 # convert wind direction degrees to radians
for i in range(0, len(u)):
ve = ve + u[i] * math.sin(D[i]) # calculate sum east speed components
vn = vn + u[i] * math.cos(D[i]) # calculate sum north speed components
ve = -ve / len(u) # determine average east speed component
vn = -vn / len(u) # determine average north speed component
uv = math.sqrt(ve * ve + vn * vn) # calculate wind speed vector magnitude
# Calculate wind speed vector direction
vdir = scipy.arctan2(ve, vn)
vdir = vdir * 180.0 / math.pi # Convert radians to degrees
if vdir < 180:
Dv = vdir + 180.0
else:
if vdir > 180.0:
Dv = vdir - 180
else:
Dv = vdir
return uv, Dv # uv in m/s, Dv in dgerees from North
if __name__ == "__main__":
import doctest
doctest.testmod()
print("Ran all tests...")
| bsd-3-clause | -2,949,205,045,050,586,000 | 26.774194 | 128 | 0.582921 | false | 3.28494 | true | false | false |
kurtraschke/camelot | camelot/view/controls/actionsbox.py | 1 | 2012 | # ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / [email protected]
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact [email protected]
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# [email protected]
#
# ============================================================================
"""Actions box"""
import logging
logger = logging.getLogger('controls.actionsbox')
from PyQt4 import QtGui
from camelot.core.utils import ugettext as _
class ActionsBox(QtGui.QGroupBox):
"""A box containing actions to be applied to a view"""
def __init__(self, parent, *args, **kwargs):
QtGui.QGroupBox.__init__(self, _('Actions'), parent)
logger.debug('create actions box')
self.args = args
self.kwargs = kwargs
def setActions(self, actions):
action_widgets = []
logger.debug('setting actions')
# keep action object alive to allow them to receive signals
self.actions = actions
layout = QtGui.QVBoxLayout()
for action in actions:
action_widget = action.render(self, *self.args)
layout.addWidget(action_widget)
action_widgets.append(action_widget)
self.setLayout(layout)
return action_widgets
| gpl-2.0 | -3,004,606,427,372,451,300 | 35.581818 | 79 | 0.647117 | false | 4.174274 | false | false | false |
fakedrake/WikipediaBase-skinz | wikipediabase/context.py | 1 | 2173 | """
"""
from functions import FunctionSkin
from skin import Skin
class Context(object):
"""
Context is a namespace that manages the stack of skins.
"""
_skin = None
@classmethod
def skins(cls):
s = cls._skin
while s:
s = s.parent_skin
yield s
@classmethod
def get_skin(cls, function=False):
"""
See if this skin will do. If not create an overlay skin and return
it. If you want a specific skin type use 'set_skin' and then this.
- function: Specify that you will need the skin for storing
functions. Will overlay a new one.
"""
need_skin = function and not isinstance(cls._skin, FunctionSkin) or \
cls._skin is None
if need_skin:
cls.set_skin(function and FunctionSkin() or Skin())
return cls._skin
@classmethod
def set_skin(cls, skin, child=True):
"""
Add a layer overlay to skin.
- skin: skin to replace with
- child: False ignores all the so-far structure and replaces.
"""
if child:
skin.set_parent(cls._skin)
cls._skin = skin
return cls._skin
@classmethod
def register_function(cls, fn, name=None, domain=None, skin=None, append=True, mapping=True, **kw):
"""
Register a function under domain.
- name: Give a name to the function. Fallback to function name
- domain: Skin domain. Fallback to name
- skin: Use specific skin (not appended)
- append: the domain is a collection. Append to it.
- mapping: the domain is a mapping, ignore if not appending
"""
s = skin or cls.get_skin(function=True)
name = name or fn.__name__
domain = domain or name
# XXX: this might not be the place to interpret append == None
# as append == True
if append or append is None:
if mapping:
s.append(domain, (name, fn), coll_type=dict)
else:
s.append(domain, fn, coll_type=list)
else:
s.set(domain or name, fn)
| bsd-3-clause | -1,320,062,703,275,934,500 | 25.5 | 103 | 0.567418 | false | 4.219417 | false | false | false |
un33k/problems | sorts/heap.py | 1 | 3559 | """
Properties
O(1) extra space
O(n lg(n)) time
Not stable: (original ordering is lost during the heap creation, which comes first)
Not really adaptive: (doesn't takes advantage of existing order in its input)
"""
"""
Heap sort is simple to implement, performs an O(n lg(n)) in-place sort,
but is not stable as any information about the ordering of the items
in the original sequence was lost during the heap creation stage, which came first.
Heapsort is not stable because operations on the heap can change the
relative order of equal items.
The first loop, the O(n) "heapify" phase, puts the array into heap order.
The second loop, the O(n lg(n)) "sortdown" phase, repeatedly extracts
the maximum and restores heap order.
The sink function is written recursively for clarity. Thus, as shown,
the code requires O(lg(n)) space for the recursive call stack. However,
the tail recursion in sink() is easily converted to iteration,
which yields the O(1) space bound.
Both phases are slightly adaptive, though not in any particularly
useful manner. In the nearly sorted case, the heapify phase destroys
the original order. In the reversed case, the heapify phase is as fast
as possible since the array starts in heap order, but then the sortdown
phase is typical. In the few unique keys case, there is some speedup
but not as much as in shell sort or 3-way quicksort.
"""
import sys
import math
import cProfile
def swap(aList, iIndex, jIndex):
"""
Given a `list` and two indices, it swaps the contents.
"""
aList[iIndex], aList[jIndex] = aList[jIndex], aList[iIndex]
def get_left_child(iIndex):
"""
Given an index it returns it's left child's index.
"""
return 2 * iIndex + 1
def get_right_child(iIndex):
"""
Given an index it returns it's right child's index.
"""
return 2 * iIndex + 2
def get_parent(iIndex):
"""
Given an index, it returns the index of it's parent.
"""
return int(math.floor((iIndex - 1) / 2))
def heapify(aList, iEnd, iIndex):
"""
Given a list, and its size and an index, this function
ensures all items are in descending order (children < parent)
"""
iLeft = get_left_child(iIndex)
iRight = get_right_child(iIndex)
iLargest = iIndex
if iLeft < iEnd and aList[iLeft] > aList[iLargest]:
iLargest = iLeft
if iRight < iEnd and aList[iRight] > aList[iLargest]:
iLargest = iRight
if iLargest != iIndex:
swap(aList, iIndex, iLargest)
heapify(aList, iEnd, iLargest)
def build_heap(aList):
"""
Given a list, it builds a heap using the list.
"""
iEnd = len(aList)
iStart = iEnd / 2 - 1 # Root of a tree is @ size/2-1
for iIndex in range(iStart, -1, -1):
heapify(aList, iEnd, iIndex)
def heap_sort(aList):
"""
Given a unsorted list of integers other comparable types,
it rearrange the integers in natural order in place in an
ascending order.
Example: Input: [8,5,3,1,7,6,0,9,4,2,5]
Output: [0,1,2,3,4,5,5,6,7,8,9]
"""
iEnd = len(aList)
build_heap(aList)
for iIndex in range(iEnd-1, 0, -1):
swap(aList, iIndex, 0)
heapify(aList, iIndex, 0)
return aList
def run_test():
"""
Test function.
"""
print "---------------------"
aList = [8, 5, 3, 1, 9, 6, 0, 7, 4, 2, 5]
aList = heap_sort(aList)
print "aList Sorted. Ascending = {}\n".format(aList)
if __name__ == "__main__":
"""
Run the code and profile it.
"""
cProfile.run('run_test()')
| bsd-2-clause | 966,786,006,663,843,300 | 28.413223 | 87 | 0.654678 | false | 3.280184 | false | false | false |
gabrielaraujof/setupam | tests/unit/setupam/builder_test/speaker_test.py | 2 | 2092 | #!/usr/bin/env python3
# Copyright (C) 2014 Gabriel F. Araujo
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import unittest.mock as mk
from os import path
import setupam.speaker
class SpeakerTest(unittest.TestCase):
def setUp(self):
self.src_path = '/home/user/source'
self.speaker = setupam.speaker.Speaker(self.src_path)
# TODO Will include tests for speaker class when it's extended.
class ResourceTest(unittest.TestCase):
def setUp(self):
patcher = mk.patch('{}.{}'.format(self.module_to_patch.__name__, self.class_to_patch.__name__))
self.addCleanup(patcher.stop)
self.mock = patcher.start()
args, kwargs = self.builder_args
self.builder = setupam.speaker.SpeakerBuilder(*args, **kwargs)
def check_call(self, args, kwargs, expected_calls):
func = getattr(self.builder, self.method_under_test.__name__)
func(*args, **kwargs)
self.mock.assert_has_calls(expected_calls)
def check_all_calls(self):
for value in self.assertion_values:
self.check_call(**value)
@staticmethod
def create_calls(*args, **kwargs):
# Create proper paths from args
paths = (path.join(*arg) if isinstance(arg, tuple) else arg for arg in args)
# Create the calls object with the paths given
calls = [mk.call(), mk.call().populate(*paths, **kwargs)]
return calls
| gpl-2.0 | -6,941,251,854,314,008,000 | 35.701754 | 103 | 0.691205 | false | 3.917603 | true | false | false |
amarjeetkapoor1/Sim | Sim_site/drawing_freecad/views.py | 1 | 2930 | from django.shortcuts import render
from django.http import HttpResponse
import os, threading, csv, tempfile
from collections import OrderedDict
#from django.core.servers.basehttp import FileWrapper
#from wsgiref.util import FileWrapper
# Create your views here.
def index(request):
return render(request, 'drawing_freecad/index.html')
#lists = {'stories':'','dep_of_foun':'','plinth_lev':'','cclear_height':'',
# 'dep_slab':'','rep_span_len':'','rep_span_wid':'','col_type':'',
# 'len_col':'','wid_col':'', 'radius_col':'','dep_beam':'',
# 'wid_beam':''}
#
lists = OrderedDict([('stories', ''), ('dep_of_foun', ''), ('plinth_lev', ''),
('cclear_height', ''), ('dep_slab', ''), ('rep_span_len', ''),
('rep_span_wid', ''), ('col_type', ''), ('len_col', ''),
('wid_col', ''), ('radius_col', ''), ('dep_beam', ''),
('wid_beam', '')])
lis = ['stories','dep_of_foun','plinth_lev','cclear_height','dep_slab','rep_span_len','rep_span_wid','col_type','len_col','wid_col','radius_col','dep_beam','wid_beam']
#bb = []
def specs(request):
try:
global lists
global lis
bb = list()
for var in lists.keys():
lists[var] = request.POST.get(var)
# print("session %s" %request.session[var])
print lists
# print lists['rep_span_len']
# print("list is : %s" %bb)
f = open('drawing_freecad/some.csv', 'w')
ww = csv.writer(f, delimiter=' ')
a = []
for i in lists.keys():
a.append(lists[i])
ww.writerow(a)
f.close()
os.system('rm project.fcstd')
os.system('cd drawing_freecad/FreeCAD_macros && freecadcmd drawing.py')
# print l
# print request.POST
# print len(request.POST)
return render(request, 'drawing_freecad/specs.html', {'lists': lists})
except:
return render(request, 'drawing_freecad/specs.html',
{'message': 'please fill again'})
def download(request):
os.system('cd drawing_freecad/drawings/svg_pdf && rm -f *')
os.system('cd drawing_freecad/drawings && rm -f drawings.zip')
os.system('cp drawing_freecad/project.fcstd ./drawing_freecad/drawings/svg_pdf/')
os.system('cd drawing_freecad/FreeCAD_macros && freecadcmd savepdf.FCMacro')
command = "./drawing_freecad/drawings/drawings.zip"
f = open(command)
response = HttpResponse(f, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="drawings.zip"'
return response
draw_list = OrderedDict([('x_dir', ''), ('y_dir', ''), ('z_dir', ''),
('hid_lines', ''), ('scale_size', ''), ('rotation', '')])
def drawing(request):
global draw_list
for i in draw_list.keys():
draw_list[i] = request.POST.get(i)
print draw_list
return render(request,'web_app/drawing.html', {'draw_list':draw_list})
| mit | -7,015,941,902,497,477,000 | 37.051948 | 167 | 0.582594 | false | 3.270089 | false | false | false |
Callek/build-relengapi | relengapi/blueprints/tokenauth/tables.py | 1 | 1677 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sqlalchemy as sa
from relengapi.blueprints.tokenauth import types
from relengapi.lib import db
from relengapi.lib.permissions import p
class Token(db.declarative_base('relengapi')):
__tablename__ = 'auth_tokens'
def __init__(self, permissions=None, **kwargs):
if permissions is not None:
kwargs['_permissions'] = ','.join((str(a) for a in permissions))
super(Token, self).__init__(**kwargs)
id = sa.Column(sa.Integer, primary_key=True)
typ = sa.Column(sa.String(4), nullable=False)
description = sa.Column(sa.Text, nullable=False)
user = sa.Column(sa.Text, nullable=True)
disabled = sa.Column(sa.Boolean, nullable=False)
_permissions = sa.Column(sa.Text, nullable=False)
def to_jsontoken(self):
tok = types.JsonToken(id=self.id, typ=self.typ, description=self.description,
permissions=[str(a) for a in self.permissions],
disabled=self.disabled)
if self.user:
tok.user = self.user
return tok
@property
def permissions(self):
token_permissions = [p.get(permissionstr)
for permissionstr in self._permissions.split(',')]
# silently ignore any nonexistent permissions; this allows us to remove unused
# permissions without causing tokens permitting those permissions to fail
# completely
return [a for a in token_permissions if a]
| mpl-2.0 | -7,511,472,294,940,432,000 | 38.928571 | 86 | 0.648181 | false | 3.983373 | false | false | false |
tungvx/deploy | Django-0.90/django/bin/django-admin.py | 1 | 5157 | #!/usr/bin/env python
from django.core import management
from optparse import OptionParser
import os, sys
ACTION_MAPPING = {
'adminindex': management.get_admin_index,
'createsuperuser': management.createsuperuser,
'createcachetable' : management.createcachetable,
# 'dbcheck': management.database_check,
'init': management.init,
'inspectdb': management.inspectdb,
'install': management.install,
'installperms': management.installperms,
'runserver': management.runserver,
'sql': management.get_sql_create,
'sqlall': management.get_sql_all,
'sqlclear': management.get_sql_delete,
'sqlindexes': management.get_sql_indexes,
'sqlinitialdata': management.get_sql_initial_data,
'sqlreset': management.get_sql_reset,
'sqlsequencereset': management.get_sql_sequence_reset,
'startapp': management.startapp,
'startproject': management.startproject,
'validate': management.validate,
}
NO_SQL_TRANSACTION = ('adminindex', 'createcachetable', 'dbcheck', 'install', 'installperms', 'sqlindexes')
def get_usage():
"""
Returns a usage string. Doesn't do the options stuff, because optparse
takes care of that.
"""
usage = ["usage: %prog action [options]\nactions:"]
available_actions = ACTION_MAPPING.keys()
available_actions.sort()
for a in available_actions:
func = ACTION_MAPPING[a]
usage.append(" %s %s -- %s" % (a, func.args, getattr(func, 'help_doc', func.__doc__)))
return '\n'.join(usage)
class DjangoOptionParser(OptionParser):
def print_usage_and_exit(self):
self.print_help(sys.stderr)
sys.exit(1)
def print_error(msg, cmd):
sys.stderr.write('Error: %s\nRun "%s --help" for help.\n' % (msg, cmd))
sys.exit(1)
def main():
# Parse the command-line arguments. optparse handles the dirty work.
parser = DjangoOptionParser(get_usage())
parser.add_option('--settings',
help='Python path to settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
parser.add_option('--pythonpath',
help='Lets you manually add a directory the Python path, e.g. "/home/djangoprojects/myproject".')
options, args = parser.parse_args()
# Take care of options.
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
# Run the appropriate action. Unfortunately, optparse can't handle
# positional arguments, so this has to parse/validate them.
try:
action = args[0]
except IndexError:
parser.print_usage_and_exit()
if not ACTION_MAPPING.has_key(action):
print_error("Your action, %r, was invalid." % action, sys.argv[0])
# switch to english, because django-admin creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we should have a working settings file.
if action not in ('startproject', 'startapp'):
from django.utils import translation
translation.activate('en-us')
if action in ('createsuperuser', 'init', 'validate'):
ACTION_MAPPING[action]()
elif action == 'inspectdb':
try:
param = args[1]
except IndexError:
parser.print_usage_and_exit()
try:
for line in ACTION_MAPPING[action](param):
print line
except NotImplementedError:
sys.stderr.write("Error: %r isn't supported for the currently selected database backend.\n" % action)
sys.exit(1)
elif action == 'createcachetable':
try:
ACTION_MAPPING[action](args[1])
except IndexError:
parser.print_usage_and_exit()
elif action in ('startapp', 'startproject'):
try:
name = args[1]
except IndexError:
parser.print_usage_and_exit()
ACTION_MAPPING[action](name, os.getcwd())
elif action == 'runserver':
if len(args) < 2:
addr = ''
port = '8000'
else:
try:
addr, port = args[1].split(':')
except ValueError:
addr, port = '', args[1]
ACTION_MAPPING[action](addr, port)
else:
from django.core import meta
if action == 'dbcheck':
mod_list = meta.get_all_installed_modules()
else:
try:
mod_list = [meta.get_app(app_label) for app_label in args[1:]]
except ImportError, e:
sys.stderr.write("Error: %s. Are you sure your INSTALLED_APPS setting is correct?\n" % e)
sys.exit(1)
if not mod_list:
parser.print_usage_and_exit()
if action not in NO_SQL_TRANSACTION:
print "BEGIN;"
for mod in mod_list:
output = ACTION_MAPPING[action](mod)
if output:
print '\n'.join(output)
if action not in NO_SQL_TRANSACTION:
print "COMMIT;"
if __name__ == "__main__":
main()
| apache-2.0 | -1,688,051,519,669,631,200 | 35.835714 | 166 | 0.618577 | false | 3.921673 | false | false | false |
simpeg/simpeg | examples/02-mesh/plot_cahn_hilliard.py | 1 | 4043 | """
Mesh: Operators: Cahn Hilliard
==============================
This example is based on the example in the FiPy_ library.
Please see their documentation for more information about the
Cahn-Hilliard equation.
The "Cahn-Hilliard" equation separates a field \\\\( \\\\phi \\\\)
into 0 and 1 with smooth transitions.
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \left( \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi \\right)
Where \\\\( f \\\\) is the energy function \\\\( f = ( a^2 / 2 )\\\\phi^2(1 - \\\\phi)^2 \\\\)
which drives \\\\( \\\\phi \\\\) towards either 0 or 1, this competes with the term
\\\\(\\\\epsilon^2 \\\\nabla^2 \\\\phi \\\\) which is a diffusion term that creates smooth changes in \\\\( \\\\phi \\\\).
The equation can be factored:
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \psi \\\\
\psi = \\frac{\partial^2 f}{\partial \phi^2} (\phi - \phi^{\\text{old}}) + \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi
Here we will need the derivatives of \\\\( f \\\\):
.. math::
\\frac{\partial f}{\partial \phi} = (a^2/2)2\phi(1-\phi)(1-2\phi)
\\frac{\partial^2 f}{\partial \phi^2} = (a^2/2)2[1-6\phi(1-\phi)]
The implementation below uses backwards Euler in time with an
exponentially increasing time step. The initial \\\\( \\\\phi \\\\)
is a normally distributed field with a standard deviation of 0.1 and
mean of 0.5. The grid is 60x60 and takes a few seconds to solve ~130
times. The results are seen below, and you can see the field separating
as the time increases.
.. _FiPy: http://www.ctcms.nist.gov/fipy/examples/cahnHilliard/generated/examples.cahnHilliard.mesh2DCoupled.html
"""
from __future__ import print_function
from SimPEG import Mesh, Utils, Solver
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True, n=60):
np.random.seed(5)
# Here we are going to rearrange the equations:
# (phi_ - phi)/dt = A*(d2fdphi2*(phi_ - phi) + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*(d2fdphi2*phi_ - d2fdphi2*phi + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*d2fdphi2*phi_ + A*( - d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - phi = dt*A*d2fdphi2*phi_ + dt*A*(- d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - dt*A*d2fdphi2 * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*dfdphi - dt*A*d2fdphi2*phi - dt*A*L*phi_ + phi
# (dt*A*d2fdphi2 - I) * phi_ = dt*A*d2fdphi2*phi + dt*A*L*phi_ - phi - dt*A*dfdphi
# (dt*A*d2fdphi2 - I - dt*A*L) * phi_ = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi
h = [(0.25, n)]
M = Mesh.TensorMesh([h, h])
# Constants
D = a = epsilon = 1.
I = Utils.speye(M.nC)
# Operators
A = D * M.faceDiv * M.cellGrad
L = epsilon**2 * M.faceDiv * M.cellGrad
duration = 75
elapsed = 0.
dexp = -5
phi = np.random.normal(loc=0.5, scale=0.01, size=M.nC)
ii, jj = 0, 0
PHIS = []
capture = np.logspace(-1, np.log10(duration), 8)
while elapsed < duration:
dt = min(100, np.exp(dexp))
elapsed += dt
dexp += 0.05
dfdphi = a**2 * 2 * phi * (1 - phi) * (1 - 2 * phi)
d2fdphi2 = Utils.sdiag(a**2 * 2 * (1 - 6 * phi * (1 - phi)))
MAT = (dt*A*d2fdphi2 - I - dt*A*L)
rhs = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi
phi = Solver(MAT)*rhs
if elapsed > capture[jj]:
PHIS += [(elapsed, phi.copy())]
jj += 1
if ii % 10 == 0:
print(ii, elapsed)
ii += 1
if plotIt:
fig, axes = plt.subplots(2, 4, figsize=(14, 6))
axes = np.array(axes).flatten().tolist()
for ii, ax in zip(np.linspace(0, len(PHIS)-1, len(axes)), axes):
ii = int(ii)
M.plotImage(PHIS[ii][1], ax=ax)
ax.axis('off')
ax.set_title('Elapsed Time: {0:4.1f}'.format(PHIS[ii][0]))
if __name__ == '__main__':
run()
plt.show()
| mit | 5,154,517,925,884,960,000 | 35.098214 | 141 | 0.566658 | false | 2.665129 | false | false | false |
sivel/ansible | test/units/cli/test_doc.py | 13 | 4110 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.cli.doc import DocCLI, RoleMixin
TTY_IFY_DATA = {
# No substitutions
'no-op': 'no-op',
'no-op Z(test)': 'no-op Z(test)',
# Simple cases of all substitutions
'I(italic)': "`italic'",
'B(bold)': '*bold*',
'M(ansible.builtin.module)': '[ansible.builtin.module]',
'U(https://docs.ansible.com)': 'https://docs.ansible.com',
'L(the user guide,https://docs.ansible.com/user-guide.html)': 'the user guide <https://docs.ansible.com/user-guide.html>',
'R(the user guide,user-guide)': 'the user guide',
'C(/usr/bin/file)': "`/usr/bin/file'",
'HORIZONTALLINE': '\n{0}\n'.format('-' * 13),
# Multiple substitutions
'The M(ansible.builtin.yum) module B(MUST) be given the C(package) parameter. See the R(looping docs,using-loops) for more info':
"The [ansible.builtin.yum] module *MUST* be given the `package' parameter. See the looping docs for more info",
# Problem cases
'IBM(International Business Machines)': 'IBM(International Business Machines)',
'L(the user guide, https://docs.ansible.com/)': 'the user guide <https://docs.ansible.com/>',
'R(the user guide, user-guide)': 'the user guide',
# de-rsty refs and anchors
'yolo :ref:`my boy` does stuff': 'yolo website for `my boy` does stuff',
'.. seealso:: Something amazing': 'See website for: Something amazing',
'.. seealso:: Troublesome multiline\n Stuff goes htere': 'See website for: Troublesome multiline\n Stuff goes htere',
'.. note:: boring stuff': 'Note: boring stuff',
}
@pytest.mark.parametrize('text, expected', sorted(TTY_IFY_DATA.items()))
def test_ttyify(text, expected):
assert DocCLI.tty_ify(text) == expected
def test_rolemixin__build_summary():
obj = RoleMixin()
role_name = 'test_role'
collection_name = 'test.units'
argspec = {
'main': {'short_description': 'main short description'},
'alternate': {'short_description': 'alternate short description'},
}
expected = {
'collection': collection_name,
'entry_points': {
'main': argspec['main']['short_description'],
'alternate': argspec['alternate']['short_description'],
}
}
fqcn, summary = obj._build_summary(role_name, collection_name, argspec)
assert fqcn == '.'.join([collection_name, role_name])
assert summary == expected
def test_rolemixin__build_summary_empty_argspec():
obj = RoleMixin()
role_name = 'test_role'
collection_name = 'test.units'
argspec = {}
expected = {
'collection': collection_name,
'entry_points': {}
}
fqcn, summary = obj._build_summary(role_name, collection_name, argspec)
assert fqcn == '.'.join([collection_name, role_name])
assert summary == expected
def test_rolemixin__build_doc():
obj = RoleMixin()
role_name = 'test_role'
path = '/a/b/c'
collection_name = 'test.units'
entrypoint_filter = 'main'
argspec = {
'main': {'short_description': 'main short description'},
'alternate': {'short_description': 'alternate short description'},
}
expected = {
'path': path,
'collection': collection_name,
'entry_points': {
'main': argspec['main'],
}
}
fqcn, doc = obj._build_doc(role_name, path, collection_name, argspec, entrypoint_filter)
assert fqcn == '.'.join([collection_name, role_name])
assert doc == expected
def test_rolemixin__build_doc_no_filter_match():
obj = RoleMixin()
role_name = 'test_role'
path = '/a/b/c'
collection_name = 'test.units'
entrypoint_filter = 'doesNotExist'
argspec = {
'main': {'short_description': 'main short description'},
'alternate': {'short_description': 'alternate short description'},
}
fqcn, doc = obj._build_doc(role_name, path, collection_name, argspec, entrypoint_filter)
assert fqcn == '.'.join([collection_name, role_name])
assert doc is None
| gpl-3.0 | -2,892,214,703,240,117,000 | 35.371681 | 134 | 0.631144 | false | 3.537005 | true | false | false |
oujezdsky/various_stuff | python/network/LAN.py | 1 | 2224 | import mechanize
import cookielib
from bs4 import BeautifulSoup
from socket import *
def getInfo(ipaddr, userAgent, proxz, hostname):
WEBFORM_NAME = 'search'
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.set_handle_equiv(True)
browser.set_handle_referer(True)
browser.set_handle_redirect(True)
browser.addheaders = userAgent
# browser.set_proxies(proxz)
cookie_jar = cookielib.LWPCookieJar()
browser.set_cookiejar(cookie_jar)
page = browser.open('https://apps.db.ripe.net/search/query.html')
for form in browser.forms():
if form.name == WEBFORM_NAME:
browser.select_form(WEBFORM_NAME)
browser.form['search:queryString'] = ipaddr
browser.form['search:sources'] = ['GRS']
submission = browser.submit().read()
parsed_submission = BeautifulSoup(submission, 'html.parser')
print ipaddr, '/',hostname
for mainIndex in parsed_submission.find_all('ul', {'class': 'attrblock'}):
for i, li in enumerate(mainIndex.findAll('li')):
if i in range(0, 2):
print '[+] ', li.text
print '\n ########## \n'
import struct
import os
os.popen("sudo ifconfig eth0 promisc")
userAgent = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1')]
proxz = {}
source_ips = []
s = socket(PF_PACKET, SOCK_RAW, ntohs(0x0800))
LOCALHOST=['192.168.0.1','127.0.0.1']
while 1:
pkt = s.recvfrom(2048)
eth_head = pkt[0][:14]
ip_head = pkt[0][14:34]
tcp_head = pkt[0][34:54]
dest_mac, source_mac, seq_number = struct.unpack("!6s6s2s", eth_head)
neco, source_ip, dest_ip = struct.unpack("!12s4s4s", ip_head)
source_port, dest_port, neco2, flag, neco3 = struct.unpack("!HH9ss6s", tcp_head)
source_ip = inet_ntoa(source_ip)
if not source_ip in source_ips and source_ip not in LOCALHOST:
source_ips.append(source_ip)
try:
hostname = gethostbyaddr(source_ip)[0]
except:
hostname = "err reaching hostname"
if hostname != 'apps.db.ripe.net':
getInfo(source_ip, userAgent, proxz, hostname)
| gpl-2.0 | -5,039,745,570,820,303,000 | 36.694915 | 104 | 0.621853 | false | 3.299703 | false | false | false |
chromium/chromium | third_party/protobuf/python/docs/generate_docs.py | 11 | 5938 | #!/usr/bin/env python
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to generate a list of all modules to use in autosummary.
This script creates a ReStructured Text file for each public module in the
protobuf Python package. The script also updates the table of contents in
``docs/index.rst`` to point to these module references.
To build the docs with Sphinx:
1. Install the needed packages (``sphinx``, ``sphinxcontrib-napoleon`` for
Google-style docstring support). I've created a conda environment file to
make this easier:
.. code:: bash
conda env create -f python/docs/environment.yml
2. (Optional) Generate reference docs files and regenerate index:
.. code:: bash
cd python/docs
python generate_docs.py
3. Run Sphinx.
.. code:: bash
make html
"""
import pathlib
import re
DOCS_DIR = pathlib.Path(__file__).parent.resolve()
PYTHON_DIR = DOCS_DIR.parent
SOURCE_DIR = PYTHON_DIR / "google" / "protobuf"
SOURCE_POSIX = SOURCE_DIR.as_posix()
# Modules which are always included:
INCLUDED_MODULES = (
"google.protobuf.internal.containers",
)
# Packages to ignore, including all modules (unless in INCLUDED_MODULES):
IGNORED_PACKAGES = (
"compiler",
"docs",
"internal",
"pyext",
"util",
)
# Ignored module stems in all packages (unless in INCLUDED_MODULES):
IGNORED_MODULES = (
"any_test_pb2",
"api_pb2",
"unittest",
"source_context_pb2",
"test_messages_proto3_pb2",
"test_messages_proto2",
)
TOC_REGEX = re.compile(
r"\.\. START REFTOC.*\.\. END REFTOC\.\n",
flags=re.DOTALL,
)
TOC_TEMPLATE = """.. START REFTOC, generated by generate_docs.py.
.. toctree::
{toctree}
.. END REFTOC.
"""
AUTOMODULE_TEMPLATE = """.. DO NOT EDIT, generated by generate_docs.py.
.. ifconfig:: build_env == 'readthedocs'
.. warning::
You are reading the documentation for the `latest committed changes
<https://github.com/protocolbuffers/protobuf/tree/master/python>`_ of
the `Protocol Buffers package for Python
<https://developers.google.com/protocol-buffers/docs/pythontutorial>`_.
Some features may not yet be released. Read the documentation for the
latest released package at `googleapis.dev
<https://googleapis.dev/python/protobuf/latest/>`_.
{module}
{underline}
.. automodule:: {module}
:members:
:inherited-members:
:undoc-members:
"""
def find_modules():
modules = []
for module_path in SOURCE_DIR.glob("**/*.py"):
# Determine the (dotted) relative package and module names.
package_path = module_path.parent.relative_to(PYTHON_DIR)
if package_path == SOURCE_DIR:
package_name = ""
module_name = module_path.stem
else:
package_name = package_path.as_posix().replace("/", ".")
module_name = package_name + "." + module_path.stem
# Filter: first, accept anything in the whitelist; then, reject anything
# at package level, then module name level.
if any(include == module_name for include in INCLUDED_MODULES):
pass
elif any(ignored in package_name for ignored in IGNORED_PACKAGES):
continue
elif any(ignored in module_path.stem for ignored in IGNORED_MODULES):
continue
if module_path.name == "__init__.py":
modules.append(package_name)
else:
modules.append(module_name)
return modules
def write_automodule(module):
contents = AUTOMODULE_TEMPLATE.format(module=module, underline="=" * len(module),)
automodule_path = DOCS_DIR.joinpath(*module.split(".")).with_suffix(".rst")
try:
automodule_path.parent.mkdir(parents=True)
except FileExistsError:
pass
with open(automodule_path, "w") as automodule_file:
automodule_file.write(contents)
def replace_toc(modules):
toctree = [module.replace(".", "/") for module in modules]
with open(DOCS_DIR / "index.rst", "r") as index_file:
index_contents = index_file.read()
toc = TOC_TEMPLATE.format(
toctree="\n ".join(toctree)
)
index_contents = re.sub(TOC_REGEX, toc, index_contents)
with open(DOCS_DIR / "index.rst", "w") as index_file:
index_file.write(index_contents)
def main():
modules = list(sorted(find_modules()))
for module in modules:
print("Generating reference for {}".format(module))
write_automodule(module)
print("Generating index.rst")
replace_toc(modules)
if __name__ == "__main__":
main()
| bsd-3-clause | -6,195,281,612,706,544,000 | 29.927083 | 84 | 0.711856 | false | 3.784576 | true | false | false |
andtorg/blacklist | blacklist.py | 1 | 2848 | #!/usr/bin/python
import shutil
import os, errno
import sys
SITES_TO_IGNORE = {
'facebook': {'search_string': 'facebook', 'site_address':'www.facebook.com'}
,'twitter': {'search_string': 'twitter', 'site_address':'twitter.com'}
,'g+': {'search_string': 'plus', 'site_address':'plus.google.com'}
,'gmail': {'search_string': 'mail.google', 'site_address':'mail.google.com'}
,'flickr': {'search_string': 'flickr', 'site_address':'www.flickr.com'}
,'flickr_2': {'search_string': 'flickr', 'site_address':'flickr.com'}
,'repubblica': {'search_string': 'repubblica', 'site_address':'repubblica.it'}
,'repubblica_2': {'search_string': 'repubblica', 'site_address':'www.repubblica.it'}
}
ACCEPTED_ARGUMENTS = {
'act': 'activate the blacklist filter',
'deact': 'deactivate the blacklist filter'
}
# to be changed
host_file = '/etc/hosts'
backup_host_file = '/etc/hosts.bak'
file_lines = []
command = sys.argv[1]
def edit_file_host(command, source_file=host_file, backup_file=backup_host_file):
_check_valid_argument(command)
_remove_file_if_exists(backup_file)
_make_backup_copy(source_file, backup_file)
if command == "act":
_enable_host_filter(source_file)
print "blacklist activated"
else:
_disable_host_filter(source_file)
print "blacklist deactivated"
def _check_valid_argument(arg):
if arg not in ACCEPTED_ARGUMENTS:
_allowed_args = ACCEPTED_ARGUMENTS.keys()
raise IndexError ('{} is not a valid argument. Allowed values are: {}'.format(arg, _allowed_args))
def _enable_host_filter(file_path):
global file_lines
file_lines = _get_file_lines(file_path)
f = open(file_path, 'w')
f.writelines(file_lines)
noisy_sites_lines = _append_noisy_sites(SITES_TO_IGNORE)
f.writelines(noisy_sites_lines)
f.close()
def _append_noisy_sites(sites_dict):
ignoring_site_list = []
ignoring_site_string = '127.0.0.1'
for k in sites_dict:
ignoring_site_list.append('{} {}\n'.format(ignoring_site_string, sites_dict[k]['site_address']))
return ignoring_site_list
def _disable_host_filter(file_path):
global file_lines
file_lines = _get_file_lines(file_path)
cleaned_file_lines = _remove_noisy_sites(file_lines, SITES_TO_IGNORE)
f = open(file_path, 'w')
f.writelines(cleaned_file_lines)
f.close()
def _remove_noisy_sites(lines, sites_dict):
searchable_sites = [x['search_string'] for x in SITES_TO_IGNORE.values()]
allowed_file_lines = [x for x in lines if not any(y in x for y in searchable_sites)]
return allowed_file_lines
def _get_file_lines(file_path):
f = open(file_path, 'r+')
lines = f.readlines()
f.close()
return lines
def _remove_file_if_exists(file):
try:
os.remove(file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def _make_backup_copy(source_file, backup_file):
shutil.copy2(source_file,backup_file)
if __name__ == '__main__':
edit_file_host(command)
| mit | 877,540,214,930,852,200 | 28.061224 | 100 | 0.692767 | false | 2.712381 | false | false | false |
peterhinch/micropython-async | v3/as_drivers/as_GPS/astests_pyb.py | 1 | 6747 | # astests_pyb.py
# Tests for AS_GPS module. Emulates a GPS unit using a UART loopback.
# Run on a Pyboard with X1 and X2 linked
# Tests for AS_GPS module (asynchronous GPS device driver)
# Based on tests for MicropyGPS by Michael Calvin McCoy
# https://github.com/inmcm/micropyGPS
# Copyright (c) 2018-2020 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
# Ported to uasyncio V3 OK.
from .as_GPS import *
from machine import UART
import uasyncio as asyncio
def callback(gps, _, arg):
print('Fix callback. Time:', gps.utc, arg)
async def run_tests():
uart = UART(4, 9600, read_buf_len=200)
swriter = asyncio.StreamWriter(uart, {})
sreader = asyncio.StreamReader(uart)
sentence_count = 0
test_RMC = ['$GPRMC,180041.896,A,3749.1851,N,08338.7891,W,001.9,154.9,240911,,,A*7A\n',
'$GPRMC,180049.896,A,3749.1808,N,08338.7869,W,001.8,156.3,240911,,,A*70\n',
'$GPRMC,092751.000,A,5321.6802,N,00630.3371,W,0.06,31.66,280511,,,A*45\n']
test_VTG = ['$GPVTG,232.9,T,,M,002.3,N,004.3,K,A*01\n']
test_GGA = ['$GPGGA,180050.896,3749.1802,N,08338.7865,W,1,07,1.1,397.4,M,-32.5,M,,0000*6C\n']
test_GSA = ['$GPGSA,A,3,07,11,28,24,26,08,17,,,,,,2.0,1.1,1.7*37\n',
'$GPGSA,A,3,07,02,26,27,09,04,15,,,,,,1.8,1.0,1.5*33\n']
test_GSV = ['$GPGSV,3,1,12,28,72,355,39,01,52,063,33,17,51,272,44,08,46,184,38*74\n',
'$GPGSV,3,2,12,24,42,058,33,11,34,053,33,07,20,171,40,20,15,116,*71\n',
'$GPGSV,3,3,12,04,12,204,34,27,11,324,35,32,11,089,,26,10,264,40*7B\n',
'$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74\n',
'$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74\n',
'$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D\n',
'$GPGSV,4,1,14,22,81,349,25,14,64,296,22,18,54,114,21,51,40,212,*7D\n',
'$GPGSV,4,2,14,24,30,047,22,04,22,312,26,31,22,204,,12,19,088,23*72\n',
'$GPGSV,4,3,14,25,17,127,18,21,16,175,,11,09,315,16,19,05,273,*72\n',
'$GPGSV,4,4,14,32,05,303,,15,02,073,*7A\n']
test_GLL = ['$GPGLL,3711.0942,N,08671.4472,W,000812.000,A,A*46\n',
'$GPGLL,4916.45,N,12311.12,W,225444,A,*1D\n',
'$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n',
'$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n',]
# '$GPGLL,0000.0000,N,00000.0000,E,235947.000,V*2D\n', # Will ignore this one
my_gps = AS_GPS(sreader, fix_cb=callback, fix_cb_args=(42,))
sentence = ''
for sentence in test_RMC:
sentence_count += 1
await swriter.awrite(sentence)
await my_gps.data_received(date=True)
print('Longitude:', my_gps.longitude())
print('Latitude', my_gps.latitude())
print('UTC Time:', my_gps.utc)
print('Speed:', my_gps.speed())
print('Date Stamp:', my_gps.date)
print('Course', my_gps.course)
print('Data is Valid:', my_gps._valid)
print('Compass Direction:', my_gps.compass_direction())
print('')
for sentence in test_GLL:
sentence_count += 1
await swriter.awrite(sentence)
await my_gps.data_received(position=True)
print('Longitude:', my_gps.longitude())
print('Latitude', my_gps.latitude())
print('UTC Time:', my_gps.utc)
print('Data is Valid:', my_gps._valid)
print('')
for sentence in test_VTG:
print('Test VTG', sentence)
sentence_count += 1
await swriter.awrite(sentence)
await asyncio.sleep_ms(200) # Can't wait for course because of position check
print('Speed:', my_gps.speed())
print('Course', my_gps.course)
print('Compass Direction:', my_gps.compass_direction())
print('')
for sentence in test_GGA:
sentence_count += 1
await swriter.awrite(sentence)
await my_gps.data_received(position=True)
print('Longitude', my_gps.longitude())
print('Latitude', my_gps.latitude())
print('UTC Time:', my_gps.utc)
# print('Fix Status:', my_gps.fix_stat)
print('Altitude:', my_gps.altitude)
print('Height Above Geoid:', my_gps.geoid_height)
print('Horizontal Dilution of Precision:', my_gps.hdop)
print('Satellites in Use by Receiver:', my_gps.satellites_in_use)
print('')
for sentence in test_GSA:
sentence_count += 1
await swriter.awrite(sentence)
await asyncio.sleep_ms(200)
print('Satellites Used', my_gps.satellites_used)
print('Horizontal Dilution of Precision:', my_gps.hdop)
print('Vertical Dilution of Precision:', my_gps.vdop)
print('Position Dilution of Precision:', my_gps.pdop)
print('')
for sentence in test_GSV:
sentence_count += 1
await swriter.awrite(sentence)
await asyncio.sleep_ms(200)
print('SV Sentences Parsed', my_gps._last_sv_sentence)
print('SV Sentences in Total', my_gps._total_sv_sentences)
print('# of Satellites in View:', my_gps.satellites_in_view)
data_valid = my_gps._total_sv_sentences > 0 and my_gps._total_sv_sentences == my_gps._last_sv_sentence
print('Is Satellite Data Valid?:', data_valid)
if data_valid:
print('Satellite Data:', my_gps._satellite_data)
print('Satellites Visible:', list(my_gps._satellite_data.keys()))
print('')
print("Pretty Print Examples:")
print('Latitude (degs):', my_gps.latitude_string(DD))
print('Longitude (degs):', my_gps.longitude_string(DD))
print('Latitude (dms):', my_gps.latitude_string(DMS))
print('Longitude (dms):', my_gps.longitude_string(DMS))
print('Latitude (kml):', my_gps.latitude_string(KML))
print('Longitude (kml):', my_gps.longitude_string(KML))
print('Latitude (degs, mins):', my_gps.latitude_string())
print('Longitude (degs, mins):', my_gps.longitude_string())
print('Speed:', my_gps.speed_string(KPH), 'or',
my_gps.speed_string(MPH), 'or',
my_gps.speed_string(KNOT))
print('Date (Long Format):', my_gps.date_string(LONG))
print('Date (Short D/M/Y Format):', my_gps.date_string(DMY))
print('Date (Short M/D/Y Format):', my_gps.date_string(MDY))
print('Time:', my_gps.time_string())
print()
print('### Final Results ###')
print('Sentences Attempted:', sentence_count)
print('Sentences Found:', my_gps.clean_sentences)
print('Sentences Parsed:', my_gps.parsed_sentences)
print('Unsupported sentences:', my_gps.unsupported_sentences)
print('CRC_Fails:', my_gps.crc_fails)
asyncio.run(run_tests())
| mit | -2,437,674,435,312,698,000 | 43.682119 | 110 | 0.605602 | false | 2.759509 | true | false | false |
selurvedu/rfdocs | rfdocs/serializers.py | 3 | 5404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from rest_framework import serializers
from rest_framework.reverse import reverse
from rfdocs.models import (RFLibrary, RFKeyword,
RFLibraryVersion, RFTag)
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer which takes an additional `field` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
default_fields = kwargs.pop('fields', [])
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
existing = set(self.fields.keys())
if self.instance:
if 'request' in self.context:
requested_fields = self.context['request'].GET.getlist('field', [])
default_fields.extend([f for f in requested_fields if f in existing])
allowed = set(default_fields)
if allowed:
for field_name in existing - allowed:
self.fields.pop(field_name)
class ConsecutiveHyperlinkedField(serializers.HyperlinkedIdentityField):
"""
Inheritor of serializers.HyperlinkedIdentityField serializer that allows to define a tuple of
lookup fields, where field can be dot-notated string.
"""
def __init__(self, *args, **kwargs):
self.lookup_fields = kwargs.pop('lookup_fields', None)
super(ConsecutiveHyperlinkedField, self).__init__(*args, **kwargs)
@staticmethod
def getattr_consecutive(obj, dot_notated_string):
"""
Allows dot-notated strings to be passed to `getattr`
"""
return reduce(getattr, dot_notated_string.split('.'), obj)
def get_url(self, obj, view_name, request, url_format):
args = ()
if self.lookup_fields:
args = (self.getattr_consecutive(obj, arg) for arg in self.lookup_fields)
return reverse(view_name, args=args, request=request, format=url_format)
class RFKeywordSerializer(serializers.HyperlinkedModelSerializer, DynamicFieldsModelSerializer):
version = ConsecutiveHyperlinkedField(
lookup_fields=('version.library.slug', 'version.slug',),
view_name='rflibraryversion_detail_api',
)
library = ConsecutiveHyperlinkedField(
lookup_fields=('version.library.slug',),
view_name='rflibrary_detail_api',
)
url = ConsecutiveHyperlinkedField(
lookup_fields=('version.library.slug', 'version.slug', 'name',),
view_name='rfkeyword_detail_api',
)
class Meta:
model = RFKeyword
fields = ('name', 'url', 'version', 'arguments', 'documentation', 'library')
class RFLibraryVersionSerializer(serializers.HyperlinkedModelSerializer,
DynamicFieldsModelSerializer):
def __init__(self, *args, **kwargs):
super(RFLibraryVersionSerializer, self).__init__(*args, **kwargs)
if 'request' in self.context:
requested_fields = self.context['request'].GET.getlist('keyword_field', [])
allowed = set(RFKeywordSerializer.Meta.fields).intersection(set(requested_fields))
if allowed:
self.fields['keywords'] = RFKeywordSerializer(
fields=list(allowed),
many=True,
context={'request': self.context['request']}
)
library = serializers.StringRelatedField()
library_url = ConsecutiveHyperlinkedField(
lookup_fields=('library.slug', ),
view_name='rflibrary_detail_api'
)
url = ConsecutiveHyperlinkedField(
lookup_fields=('library.slug', 'slug'),
view_name='rflibraryversion_detail_api',
)
keywords = RFKeywordSerializer(
many=True,
fields=['name', 'url', 'arguments']
)
class Meta:
model = RFLibraryVersion
fields = ['name', 'library', 'library_url', 'slug', 'url', 'source_url', 'keywords', 'status',
'date_added', 'date_modified', 'date_deprecate']
class RFLibrarySerializer(serializers.HyperlinkedModelSerializer, DynamicFieldsModelSerializer):
def __init__(self, *args, **kwargs):
super(RFLibrarySerializer, self).__init__(*args, **kwargs)
if 'request' in self.context:
requested_fields = self.context['request'].GET.getlist('version_field', [])
allowed = set(RFLibraryVersionSerializer.Meta.fields).intersection(set(requested_fields))
if allowed:
self.fields['versions'] = RFLibraryVersionSerializer(
fields=list(allowed),
many=True,
context={'request': self.context['request']}
)
url = serializers.HyperlinkedIdentityField(
view_name='rflibrary_detail_api',
lookup_field='slug'
)
versions = RFLibraryVersionSerializer(
fields=['name', 'url'],
many=True
)
class Meta:
model = RFLibrary
fields = ('name', 'slug', 'url', 'versions')
lookup_field = 'slug'
class RFTagSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
lookup_field='slug',
view_name='rftag_detail_api',
)
versions = RFLibraryVersionSerializer(fields=('name', 'url'))
class Meta:
model = RFTag
fields = ('name', 'slug', 'url', 'versions')
| mit | -520,364,985,479,683,200 | 35.026667 | 102 | 0.623612 | false | 4.215289 | false | false | false |
edineicolli/daruma-exemplo-python | scripts/fiscal/ui_fiscal_icnfabrir.py | 1 | 5516 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_fiscal_icnfabrir.ui'
#
# Created: Mon Nov 24 22:25:54 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
from pydaruma.pydaruma import iCNFAbrir_ECF_Daruma
from scripts.fiscal.retornofiscal import tratarRetornoFiscal
class Ui_ui_FISCAL_iCNFAbrir(QtGui.QWidget):
def __init__(self):
super(Ui_ui_FISCAL_iCNFAbrir, self).__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.on_pushButtonEnviar_clicked)
self.pushButton_2.clicked.connect(self.on_pushButtonCancelar_clicked)
def on_pushButtonEnviar_clicked(self):
# Declaraçao das Variaveis que recebem os valores da UI
StrCPF = self.lineEditCPF.text()
StrNome = self.lineEditNome.text()
StrEndereco = self.lineEditEndereco.text()
# Chamada do Método
tratarRetornoFiscal(iCNFAbrir_ECF_Daruma(StrCPF,StrNome,StrEndereco), self)
def on_pushButtonCancelar_clicked(self):
self.close()
def setupUi(self, ui_FISCAL_iCNFAbrir):
ui_FISCAL_iCNFAbrir.setObjectName("ui_FISCAL_iCNFAbrir")
ui_FISCAL_iCNFAbrir.resize(263, 123)
ui_FISCAL_iCNFAbrir.setMinimumSize(QtCore.QSize(263, 123))
ui_FISCAL_iCNFAbrir.setMaximumSize(QtCore.QSize(263, 123))
self.verticalLayout = QtGui.QVBoxLayout(ui_FISCAL_iCNFAbrir)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_nome = QtGui.QLabel(ui_FISCAL_iCNFAbrir)
self.label_nome.setObjectName("label_nome")
self.gridLayout.addWidget(self.label_nome, 0, 0, 1, 1)
self.lineEditNome = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditNome.sizePolicy().hasHeightForWidth())
self.lineEditNome.setSizePolicy(sizePolicy)
self.lineEditNome.setMaxLength(255)
self.lineEditNome.setCursorPosition(27)
self.lineEditNome.setObjectName("lineEditNome")
self.gridLayout.addWidget(self.lineEditNome, 0, 1, 1, 1)
self.label_endereco = QtGui.QLabel(ui_FISCAL_iCNFAbrir)
self.label_endereco.setObjectName("label_endereco")
self.gridLayout.addWidget(self.label_endereco, 1, 0, 1, 1)
self.lineEditEndereco = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir)
self.lineEditEndereco.setObjectName("lineEditEndereco")
self.gridLayout.addWidget(self.lineEditEndereco, 1, 1, 1, 1)
self.label_cpf = QtGui.QLabel(ui_FISCAL_iCNFAbrir)
self.label_cpf.setObjectName("label_cpf")
self.gridLayout.addWidget(self.label_cpf, 2, 0, 1, 1)
self.lineEditCPF = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir)
self.lineEditCPF.setObjectName("lineEditCPF")
self.gridLayout.addWidget(self.lineEditCPF, 2, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton = QtGui.QPushButton(ui_FISCAL_iCNFAbrir)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_2 = QtGui.QPushButton(ui_FISCAL_iCNFAbrir)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(ui_FISCAL_iCNFAbrir)
QtCore.QMetaObject.connectSlotsByName(ui_FISCAL_iCNFAbrir)
def retranslateUi(self, ui_FISCAL_iCNFAbrir):
ui_FISCAL_iCNFAbrir.setWindowTitle(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Método iCNFAbrir_ECF_Daruma", None, QtGui.QApplication.UnicodeUTF8))
self.label_nome.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Nome:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditNome.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Daruma Developers Community", None, QtGui.QApplication.UnicodeUTF8))
self.label_endereco.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Endereço:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditEndereco.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Shishima Hifumi", None, QtGui.QApplication.UnicodeUTF8))
self.label_cpf.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "CPF:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditCPF.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "111.111.111-11", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Abrir CNF", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
| gpl-2.0 | -833,727,759,164,012,000 | 55.244898 | 164 | 0.727322 | false | 3.251917 | false | false | false |
Larhard/Maildir-Notifier | mail_notify.py | 1 | 1199 | #!/bin/env python3
import argparse
import imp
import logging
import sys
import os
import watcher
import config
def main(argv):
parser = argparse.ArgumentParser('mail_notify')
parser.add_argument('maildir')
parser.add_argument('--config', '-c', help="configuration file")
parser.add_argument('--verbose', '-v', help="make me verbose", action='store_true')
parser.add_argument('--debug', help="make me very verbose", action='store_true')
args = parser.parse_args(argv)
logger_level = logging.WARNING
logger_format = '%(levelname)s: %(message)s'
if args.verbose:
logger_level = logging.INFO
if args.debug:
logger_level = logging.NOTSET
logger_format = '%(asctime)-15s: %(name)s: %(levelname)s: %(message)s'
logging.basicConfig(format=logger_format, level=logger_level)
basedir = os.path.dirname(__file__)
config.override(imp.load_source('config', os.path.join(basedir, 'default_config')).__dict__)
if args.config:
config.override(imp.load_source('config', args.config).__dict__)
config.override(args.__dict__)
watcher.watch_maildir(args.maildir)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 2,440,021,261,854,703,600 | 28.243902 | 96 | 0.661384 | false | 3.579104 | true | false | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py | 2 | 3598 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
import re
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
CURL_CONNECTION_TIMEOUT = '5'
class ServiceCheck(Script):
def service_check(self, env):
import params
env.set_params(params)
if params.stack_version != "" and compare_versions(params.stack_version, '4.0') >= 0:
path_to_distributed_shell_jar = "/usr/iop/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar"
else:
path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client "
"-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}")
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
else:
smoke_cmd = yarn_distrubuted_shell_check_cmd
return_code, out = shell.checked_call(smoke_cmd,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.smokeuser,
)
m = re.search("appTrackingUrl=(.*),\s", out)
app_url = m.group(1)
splitted_app_url = str(app_url).split('/')
for item in splitted_app_url:
if "application" in item:
application_name = item
json_response_received = False
for rm_host in params.rm_hosts:
info_app_url = "http://" + rm_host + ":" + params.rm_port + "/ws/v1/cluster/apps/" + application_name
get_app_info_cmd = "curl --negotiate -u : -sL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
return_code, stdout = shell.checked_call(get_app_info_cmd,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.smokeuser,
)
try:
json_response = json.loads(stdout)
json_response_received = True
if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
raise Exception("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
except Exception as e:
pass
if not json_response_received:
raise Exception("Could not get json response from YARN API")
if __name__ == "__main__":
ServiceCheck().execute()
| apache-2.0 | 46,482,568,557,292,920 | 39.426966 | 134 | 0.647582 | false | 3.88973 | false | false | false |
dudanogueira/microerp | microerp/almoxarifado/migrations/0004_auto_20141006_1957.py | 1 | 6177 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rh', '0001_initial'),
('estoque', '0005_auto_20141001_0953'),
('comercial', '0007_auto_20141006_1852'),
('almoxarifado', '0003_auto_20140917_0843'),
]
operations = [
migrations.CreateModel(
name='LinhaListaMaterial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade_requisitada', models.DecimalField(max_digits=10, decimal_places=2)),
('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialDoContrato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entregue', models.BooleanField(default=False)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')),
('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
]
| lgpl-3.0 | 3,194,224,929,188,960,000 | 45.795455 | 127 | 0.58216 | false | 4.021484 | false | false | false |
anryko/ansible | lib/ansible/module_utils/network/panos/panos.py | 38 | 16706 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2018 Palo Alto Networks techbizdev, <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
_MIN_VERSION_ERROR = '{0} version ({1}) < minimum version ({2})'
HAS_PANDEVICE = True
try:
import pandevice
from pandevice.base import PanDevice
from pandevice.firewall import Firewall
from pandevice.panorama import DeviceGroup, Template, TemplateStack
from pandevice.policies import PreRulebase, PostRulebase, Rulebase
from pandevice.device import Vsys
from pandevice.errors import PanDeviceError
except ImportError:
HAS_PANDEVICE = False
def _vstr(val):
return '{0}.{1}.{2}'.format(*val)
class ConnectionHelper(object):
def __init__(self, min_pandevice_version, min_panos_version,
panorama_error, firewall_error):
"""Performs connection initialization and determines params."""
# Params for AnsibleModule.
self.argument_spec = {}
self.required_one_of = []
# Params for pandevice tree construction.
self.vsys = None
self.device_group = None
self.vsys_dg = None
self.rulebase = None
self.template = None
self.template_stack = None
self.vsys_importable = None
self.min_pandevice_version = min_pandevice_version
self.min_panos_version = min_panos_version
self.panorama_error = panorama_error
self.firewall_error = firewall_error
# The PAN-OS device.
self.device = None
def get_pandevice_parent(self, module):
"""Builds the pandevice object tree, returning the parent object.
If pandevice is not installed, then module.fail_json() will be
invoked.
Arguments:
* module(AnsibleModule): the ansible module.
Returns:
* The parent pandevice object based on the spec given to
get_connection().
"""
# Sanity check.
if not HAS_PANDEVICE:
module.fail_json(msg='Missing required library "pandevice".')
# Verify pandevice minimum version.
if self.min_pandevice_version is not None:
pdv = tuple(int(x) for x in pandevice.__version__.split('.'))
if pdv < self.min_pandevice_version:
module.fail_json(msg=_MIN_VERSION_ERROR.format(
'pandevice', pandevice.__version__,
_vstr(self.min_pandevice_version)))
pan_device_auth, serial_number = None, None
if module.params['provider'] and module.params['provider']['ip_address']:
pan_device_auth = (
module.params['provider']['ip_address'],
module.params['provider']['username'],
module.params['provider']['password'],
module.params['provider']['api_key'],
module.params['provider']['port'],
)
serial_number = module.params['provider']['serial_number']
elif module.params.get('ip_address', None) is not None:
pan_device_auth = (
module.params['ip_address'],
module.params['username'],
module.params['password'],
module.params['api_key'],
module.params['port'],
)
msg = 'Classic provider params are deprecated; use "provider" instead'
module.deprecate(msg, '2.12')
else:
module.fail_json(msg='Provider params are required.')
# Create the connection object.
try:
self.device = PanDevice.create_from_device(*pan_device_auth)
except PanDeviceError as e:
module.fail_json(msg='Failed connection: {0}'.format(e))
# Verify PAN-OS minimum version.
if self.min_panos_version is not None:
if self.device._version_info < self.min_panos_version:
module.fail_json(msg=_MIN_VERSION_ERROR.format(
'PAN-OS', _vstr(self.device._version_info),
_vstr(self.min_panos_version)))
# Optional: Firewall via Panorama connectivity specified.
if hasattr(self.device, 'refresh_devices') and serial_number:
fw = Firewall(serial=serial_number)
self.device.add(fw)
self.device = fw
parent = self.device
not_found = '{0} "{1}" is not present.'
pano_mia_param = 'Param "{0}" is required for Panorama but not specified.'
ts_error = 'Specify either the template or the template stack{0}.'
if hasattr(self.device, 'refresh_devices'):
# Panorama connection.
# Error if Panorama is not supported.
if self.panorama_error is not None:
module.fail_json(msg=self.panorama_error)
# Spec: template stack.
tmpl_required = False
added_template = False
if self.template_stack is not None:
name = module.params[self.template_stack]
if name is not None:
stacks = TemplateStack.refreshall(parent, name_only=True)
for ts in stacks:
if ts.name == name:
parent = ts
added_template = True
break
else:
module.fail_json(msg=not_found.format(
'Template stack', name,
))
elif self.template is not None:
tmpl_required = True
else:
module.fail_json(msg=pano_mia_param.format(self.template_stack))
# Spec: template.
if self.template is not None:
name = module.params[self.template]
if name is not None:
if added_template:
module.fail_json(msg=ts_error.format(', not both'))
templates = Template.refreshall(parent, name_only=True)
for t in templates:
if t.name == name:
parent = t
break
else:
module.fail_json(msg=not_found.format(
'Template', name,
))
elif tmpl_required:
module.fail_json(msg=ts_error.format(''))
else:
module.fail_json(msg=pano_mia_param.format(self.template))
# Spec: vsys importable.
vsys_name = self.vsys_importable or self.vsys
if vsys_name is not None:
name = module.params[vsys_name]
if name not in (None, 'shared'):
vo = Vsys(name)
parent.add(vo)
parent = vo
# Spec: vsys_dg or device_group.
dg_name = self.vsys_dg or self.device_group
if dg_name is not None:
name = module.params[dg_name]
if name not in (None, 'shared'):
groups = DeviceGroup.refreshall(parent, name_only=True)
for dg in groups:
if dg.name == name:
parent = dg
break
else:
module.fail_json(msg=not_found.format(
'Device group', name,
))
# Spec: rulebase.
if self.rulebase is not None:
if module.params[self.rulebase] in (None, 'pre-rulebase'):
rb = PreRulebase()
parent.add(rb)
parent = rb
elif module.params[self.rulebase] == 'rulebase':
rb = Rulebase()
parent.add(rb)
parent = rb
elif module.params[self.rulebase] == 'post-rulebase':
rb = PostRulebase()
parent.add(rb)
parent = rb
else:
module.fail_json(msg=not_found.format(
'Rulebase', module.params[self.rulebase]))
else:
# Firewall connection.
# Error if firewalls are not supported.
if self.firewall_error is not None:
module.fail_json(msg=self.firewall_error)
# Spec: vsys or vsys_dg or vsys_importable.
vsys_name = self.vsys_dg or self.vsys or self.vsys_importable
if vsys_name is not None:
parent.vsys = module.params[vsys_name]
# Spec: rulebase.
if self.rulebase is not None:
rb = Rulebase()
parent.add(rb)
parent = rb
# Done.
return parent
def get_connection(vsys=None, device_group=None,
vsys_dg=None, vsys_importable=None,
rulebase=None, template=None, template_stack=None,
with_classic_provider_spec=False, with_state=True,
argument_spec=None, required_one_of=None,
min_pandevice_version=None, min_panos_version=None,
panorama_error=None, firewall_error=None):
"""Returns a helper object that handles pandevice object tree init.
The `vsys`, `device_group`, `vsys_dg`, `vsys_importable`, `rulebase`,
`template`, and `template_stack` params can be any of the following types:
* None - do not include this in the spec
* True - use the default param name
* string - use this string for the param name
The `min_pandevice_version` and `min_panos_version` args expect a 3 element
tuple of ints. For example, `(0, 6, 0)` or `(8, 1, 0)`.
If you are including template support (by defining either `template` and/or
`template_stack`), and the thing the module is enabling the management of is
an "importable", you should define either `vsys_importable` (whose default
value is None) or `vsys` (whose default value is 'vsys1').
Arguments:
vsys: The vsys (default: 'vsys1').
device_group: Panorama only - The device group (default: 'shared').
vsys_dg: The param name if vsys and device_group are a shared param.
vsys_importable: Either this or `vsys` should be specified. For:
- Interfaces
- VLANs
- Virtual Wires
- Virtual Routers
rulebase: This is a policy of some sort.
template: Panorama - The template name.
template_stack: Panorama - The template stack name.
with_classic_provider_spec(bool): Include the ip_address, username,
password, api_key, and port params in the base spec, and make the
"provider" param optional.
with_state(bool): Include the standard 'state' param.
argument_spec(dict): The argument spec to mixin with the
generated spec based on the given parameters.
required_one_of(list): List of lists to extend into required_one_of.
min_pandevice_version(tuple): Minimum pandevice version allowed.
min_panos_version(tuple): Minimum PAN-OS version allowed.
panorama_error(str): The error message if the device is Panorama.
firewall_error(str): The error message if the device is a firewall.
Returns:
ConnectionHelper
"""
helper = ConnectionHelper(
min_pandevice_version, min_panos_version,
panorama_error, firewall_error)
req = []
spec = {
'provider': {
'required': True,
'type': 'dict',
'required_one_of': [['password', 'api_key'], ],
'options': {
'ip_address': {'required': True},
'username': {'default': 'admin'},
'password': {'no_log': True},
'api_key': {'no_log': True},
'port': {'default': 443, 'type': 'int'},
'serial_number': {'no_log': True},
},
},
}
if with_classic_provider_spec:
spec['provider']['required'] = False
spec['provider']['options']['ip_address']['required'] = False
del(spec['provider']['required_one_of'])
spec.update({
'ip_address': {'required': False},
'username': {'default': 'admin'},
'password': {'no_log': True},
'api_key': {'no_log': True},
'port': {'default': 443, 'type': 'int'},
})
req.extend([
['provider', 'ip_address'],
['provider', 'password', 'api_key'],
])
if with_state:
spec['state'] = {
'default': 'present',
'choices': ['present', 'absent'],
}
if vsys_dg is not None:
if isinstance(vsys_dg, bool):
param = 'vsys_dg'
else:
param = vsys_dg
spec[param] = {}
helper.vsys_dg = param
else:
if vsys is not None:
if isinstance(vsys, bool):
param = 'vsys'
else:
param = vsys
spec[param] = {'default': 'vsys1'}
helper.vsys = param
if device_group is not None:
if isinstance(device_group, bool):
param = 'device_group'
else:
param = device_group
spec[param] = {'default': 'shared'}
helper.device_group = param
if vsys_importable is not None:
if vsys is not None:
raise KeyError('Define "vsys" or "vsys_importable", not both.')
if isinstance(vsys_importable, bool):
param = 'vsys'
else:
param = vsys_importable
spec[param] = {}
helper.vsys_importable = param
if rulebase is not None:
if isinstance(rulebase, bool):
param = 'rulebase'
else:
param = rulebase
spec[param] = {
'default': None,
'choices': ['pre-rulebase', 'rulebase', 'post-rulebase'],
}
helper.rulebase = param
if template is not None:
if isinstance(template, bool):
param = 'template'
else:
param = template
spec[param] = {}
helper.template = param
if template_stack is not None:
if isinstance(template_stack, bool):
param = 'template_stack'
else:
param = template_stack
spec[param] = {}
helper.template_stack = param
if argument_spec is not None:
for k in argument_spec.keys():
if k in spec:
raise KeyError('{0}: key used by connection helper.'.format(k))
spec[k] = argument_spec[k]
if required_one_of is not None:
req.extend(required_one_of)
# Done.
helper.argument_spec = spec
helper.required_one_of = req
return helper
| gpl-3.0 | -3,546,844,968,293,405,700 | 38.966507 | 92 | 0.556507 | false | 4.325738 | false | false | false |
Arlefreak/arte7 | cms/migrations/0016_auto_20170417_0326.py | 1 | 1713 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-17 03:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cms', '0015_operasprimas_video'),
]
operations = [
migrations.AlterModelOptions(
name='cortometrajes',
options={'ordering': ['order'], 'verbose_name': 'Cortometraje', 'verbose_name_plural': 'Cortometrajes'},
),
migrations.AlterModelOptions(
name='operasprimasentries',
options={'ordering': ['order'], 'verbose_name': 'Opera Prima', 'verbose_name_plural': 'Operas Primas'},
),
migrations.RemoveField(
model_name='operasprimas',
name='second_description',
),
migrations.AddField(
model_name='cortometrajes',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AddField(
model_name='cortometrajes',
name='slug',
field=models.CharField(default=django.utils.timezone.now, editable=False, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='operasprimasentries',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AddField(
model_name='operasprimasentries',
name='slug',
field=models.CharField(default=1, editable=False, max_length=200),
preserve_default=False,
),
]
| gpl-3.0 | 1,586,576,616,075,533,800 | 33.26 | 116 | 0.590776 | false | 4.1477 | false | false | false |
PircK/six | racunalnik.py | 1 | 2002 | import threading # za vzporedno izvajanje
import logging
from alfabeta import *
######################################################################
## Igralec računalnik
class Racunalnik():
def __init__(self, gui, algoritem):
self.gui = gui
self.algoritem = algoritem # Algoritem, ki izračuna potezo
self.mislec = None # Vlakno (thread), ki razmišlja
def igraj(self):
"""Igraj potezo, ki jo vrne algoritem."""
# Naredimo vlakno, ki mu podamo *kopijo* igre (da ne bo zmedel GUIja):
self.mislec = threading.Thread(
target=lambda: self.algoritem.izracunaj_potezo(self.gui.igra.kopija()))
# Poženemo vlakno:
self.mislec.start()
# Gremo preverjat, ali je bila najdena poteza:
self.gui.plosca.after(100, self.preveri_potezo)
def preveri_potezo(self):
"""Vsakih 100ms preveri, ali je algoritem že izračunal potezo."""
poteza = self.algoritem.poteza
if poteza != None:
# self.algoritem.poteza vrne par (i, j) funkcija povleci_potezo
# pa sprejme i, j, zato uporabimo *poteza
self.gui.povleci_potezo(*poteza)
# Vzporedno vlakno ni več aktivno, zato ga "pozabimo"
self.mislec = None
else:
# Algoritem še ni našel poteze, preveri še enkrat čez 100ms
self.gui.plosca.after(100, self.preveri_potezo)
def prekini(self):
'''prekine razmišljanje računalnika'''
# To metodo kliče GUI, če je treba prekiniti razmišljanje.
if self.mislec:
# Algoritmu sporočimo, da mora nehati z razmišljanjem
self.algoritem.prekini()
# Počakamo, da se vlakno ustavi
self.mislec.join()
self.mislec = None
def klik(self, i, j):
'''se odzove na klik uporabnika, ko je na potezi računalnik'''
# Računalnik ignorira klike uporabnika
pass
| mit | 821,320,608,498,856,200 | 34.375 | 83 | 0.585563 | false | 2.80198 | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/filters/unsharp_filter.py | 1 | 2355 | from bitmovin.utils import Serializable
from . import AbstractFilter
class UnsharpFilter(AbstractFilter, Serializable):
def __init__(self,
name=None,
luma_matrix_horizontal_size=None,
luma_matrix_vertical_size=None,
luma_effect_strength=None,
chroma_matrix_horizontal_size=None,
chroma_matrix_vertical_size=None,
chroma_effect_strength=None,
id_=None,
custom_data=None,
description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self.lumaMatrixHorizontalSize = luma_matrix_horizontal_size
self.lumaMatrixVerticalSize = luma_matrix_vertical_size
self.lumaEffectStrength = luma_effect_strength
self.chromaMatrixHorizontalSize = chroma_matrix_horizontal_size
self.chromaMatrixVerticalSize = chroma_matrix_vertical_size
self.chromaEffectStrength = chroma_effect_strength
def serialize(self):
serialized = super().serialize()
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
name = json_object.get('name')
description = json_object.get('description')
luma_matrix_horizontal_size = json_object.get('lumaMatrixHorizontalSize')
luma_matrix_vertical_size = json_object.get('lumaMatrixVerticalSize')
luma_effect_strength = json_object.get('lumaEffectStrength')
chroma_matrix_horizontal_size = json_object.get('chromaMatrixHorizontalSize')
chroma_matrix_vertical_size = json_object.get('chromaMatrixVerticalSize')
chroma_effect_strength = json_object.get('chromaEffectStrength')
unsharp_filter = UnsharpFilter(
name=name,
id_=id_,
description=description,
luma_matrix_horizontal_size=luma_matrix_horizontal_size,
luma_matrix_vertical_size=luma_matrix_vertical_size,
luma_effect_strength=luma_effect_strength,
chroma_matrix_horizontal_size=chroma_matrix_horizontal_size,
chroma_matrix_vertical_size=chroma_matrix_vertical_size,
chroma_effect_strength=chroma_effect_strength
)
return unsharp_filter
| unlicense | 6,443,985,348,660,007,000 | 41.818182 | 94 | 0.651805 | false | 3.873355 | false | false | false |
softwaresaved/softwareindex | softwareindex/handlers/slideshare_handler.py | 1 | 1478 | # This is a software index handler that gives a score based on the
# number of mentions in SlideShare. It uses the SlideShare API:
# http://www.slideshare.net/developers/documentation
#
# Inputs:
# - Identifier (String)
#
# Outputs:
# - score (Number)
# - description (String)
#
# Notes: this handler treats the software identifier as a string,
# even if it is a URL represented as a string. The behaviour of the
# SlideShare API for this has not been tested
import time, urllib, urllib2
from hashlib import sha1
from bs4 import BeautifulSoup
SEARCH_URL = 'https://www.slideshare.net/api/2/search_slideshows'
MATCH_STRING = 'TotalResults'
class slideshare_handler:
def get_score(self, identifier, key, secret, **kwargs):
""" Return the number of mentions in SlideShare and a descriptor
Needs an API key, which can be obtained here:
http://www.slideshare.net/developers/applyforapi """
ts = int(time.time())
strts = str(ts)
params = {
'api_key' : key,
'ts' : strts,
'hash' : sha1(secret+strts).hexdigest(),
'q' : identifier,
}
params.update(kwargs)
response = urllib2.urlopen(SEARCH_URL + '?' + urllib.urlencode(params))
soup = BeautifulSoup(response, 'xml')
return soup.find(MATCH_STRING).string
def get_description(self):
return 'Score based on number of mentions of software identifier in SlideShare'
| bsd-3-clause | -3,192,798,027,253,753,000 | 30.446809 | 87 | 0.658322 | false | 3.780051 | false | false | false |
grollins/sardine | examples/ethane/compute_modes.py | 1 | 3878 | """
Computes normal modes for ethane.
"""
from os import mkdir
from os.path import exists, join
from sardine.universe import UniverseFactory
from sardine.energy import BondEnergyFactory, AngleEnergyFactory, VDWEnergyFactory
from sardine.energy import EnergyFunctionFactory, GradientFunctionFactory
from sardine.nma import compute_hessian, compute_force_constant_matrix,\
compute_normal_modes, generate_mode_trajectory
from sardine.trajectory import save_trajectory_to_pdb
from sardine.minimize import BFGSMinimizer
from sardine.util import coords_1d_to_2d
PDB_FILENAME = "C2H6_ideal_trans_min_final.pdb"
SF_FILENAME = "C2H6.sf"
OUTPUT_DIR = "modes"
def main():
if not exists(OUTPUT_DIR):
mkdir(OUTPUT_DIR)
uf = UniverseFactory()
uf.load_atoms_from_file(PDB_FILENAME)
universe = uf.create_universe()
bond_energy_factory = BondEnergyFactory()
bond_energy_factory.load_bonds_from_file(SF_FILENAME)
bond_energy_func = bond_energy_factory.create_energy_func(num_atoms=len(universe))
bond_gradient_func = bond_energy_factory.create_gradient_func(num_atoms=len(universe))
angle_energy_factory = AngleEnergyFactory()
angle_energy_factory.load_angles_from_file(SF_FILENAME)
angle_energy_func = angle_energy_factory.create_energy_func()
angle_gradient_func = angle_energy_factory.create_gradient_func()
vdw_energy_factory = VDWEnergyFactory()
vdw_energy_factory.load_vdw_from_file(SF_FILENAME)
vdw_energy_func = vdw_energy_factory.create_energy_func()
vdw_gradient_func = vdw_energy_factory.create_gradient_func()
eff = EnergyFunctionFactory()
eff.add_energy_term('bonds', bond_energy_func)
eff.add_energy_term('angles', angle_energy_func)
eff.add_energy_term('vdw', vdw_energy_func)
energy_func = eff.create_energy_func(
['bonds', 'angles', 'vdw'], num_atoms=len(universe))
gff = GradientFunctionFactory()
gff.add_gradient_term('bonds', bond_gradient_func)
gff.add_gradient_term('angles', angle_gradient_func)
gff.add_gradient_term('vdw', vdw_gradient_func)
gradient_func = gff.create_gradient_func(
['bonds', 'angles', 'vdw'], num_atoms=len(universe))
# ======================
# = Minimize structure =
# ======================
minimizer = BFGSMinimizer(maxiter=200)
X = universe.get_coords().flatten()
energy_initial = energy_func(X)
X_min, energy_min = minimizer.run_minimization(
energy_func, gradient_func, X,
num_atoms=len(universe),
save_trajectory=True, noisy=True)
print energy_initial, energy_min
trajectory = minimizer.get_trajectory()
save_trajectory_to_pdb('minimization.pdb', trajectory, universe,
bond_energy_factory)
print "Wrote minimization.pdb"
# minimization output is a flat array. convert it to (N,3) array
X_min = coords_1d_to_2d(X_min)
# ========================
# = Compute normal modes =
# ========================
M = universe.get_inv_sqrt_mass_matrix()
H = compute_hessian(energy_func, X_min)
F = compute_force_constant_matrix(H, M)
normal_modes = compute_normal_modes(F, discard_trans_and_rot=True)
mode_freqs = normal_modes.get_frequencies()
with open(join(OUTPUT_DIR, 'eigen_values.txt'), 'w') as f:
f.write("%s" % normal_modes.freq_to_str())
for i in xrange(len(mode_freqs)):
mode_trajectory = generate_mode_trajectory(
normal_modes, initial_coords=X_min,
mode_number=i, peak_scale_factor=0.5)
save_trajectory_to_pdb(
join(OUTPUT_DIR, 'ethane_mode%02d.pdb') % (i+1),
mode_trajectory, universe, bond_energy_factory)
if __name__ == '__main__':
main() | bsd-2-clause | 7,910,962,338,234,839,000 | 38.181818 | 90 | 0.647499 | false | 3.434898 | false | false | false |
Trilarion/imperialism-remake | source/imperialism_remake/client/graphics/mappers/workforce_to_action_cursor_mapper.py | 1 | 2516 | # Imperialism remake
# Copyright (C) 2020 amtyurin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import logging
from PyQt5 import QtGui
from imperialism_remake.base import constants
from imperialism_remake.server.models.workforce_action import WorkforceAction
logger = logging.getLogger(__name__)
class WorkforceToActionCursorMapper:
def __init__(self, server_scenario):
super().__init__()
self._cursors = {}
self._cursor_not_allowed = {}
workforce_action_cursor_settings = server_scenario.get_workforce_action_cursor_settings()
for workforce_type in workforce_action_cursor_settings:
if workforce_type not in self._cursors:
self._cursors[workforce_type] = {}
self._cursors[workforce_type][WorkforceAction.MOVE] = QtGui.QCursor(QtGui.QPixmap(
constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER,
workforce_action_cursor_settings[workforce_type]['workforce_action_cursor_move'])))
self._cursors[workforce_type][WorkforceAction.DUTY_ACTION] = QtGui.QCursor(QtGui.QPixmap(
constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER,
workforce_action_cursor_settings[workforce_type][
'workforce_action_cursor_duty_action'])))
self._cursor_not_allowed[workforce_type] = QtGui.QCursor(QtGui.QPixmap(
constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER,
workforce_action_cursor_settings[workforce_type][
'workforce_action_cursor_not_allowed'])))
def get_cursor_of_type(self, workforce_type: int, action: WorkforceAction):
return self._cursors[workforce_type][action]
def get_cursor_not_allowed_of_type(self, workforce_type: int):
return self._cursor_not_allowed[workforce_type]
| gpl-3.0 | 5,734,503,904,103,267,000 | 45.592593 | 116 | 0.674086 | false | 4.012759 | false | false | false |
ceache/treadmill | lib/python/treadmill/api/app_group.py | 2 | 2320 | """Implementation of AppGroup API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
from treadmill import context
from treadmill import schema
class API:
"""Treadmill AppGroup REST api."""
def __init__(self):
"""init"""
def _admin_app_group():
"""Lazily return admin object."""
return context.GLOBAL.admin.app_group()
@schema.schema({'$ref': 'app_group.json#/resource_id'})
def get(rsrc_id):
"""Get application configuration."""
result = _admin_app_group().get(rsrc_id)
result['_id'] = rsrc_id
return result
@schema.schema(
{'$ref': 'app_group.json#/resource_id'},
{'allOf': [{'$ref': 'app_group.json#/resource'},
{'$ref': 'app_group.json#/verbs/create'}]}
)
def create(rsrc_id, rsrc):
"""Create (configure) application."""
_admin_app_group().create(rsrc_id, rsrc)
return _admin_app_group().get(rsrc_id, dirty=True)
@schema.schema(
{'$ref': 'app_group.json#/resource_id'},
{'allOf': [{'$ref': 'app_group.json#/resource'},
{'$ref': 'app_group.json#/verbs/update'}]}
)
def update(rsrc_id, rsrc):
"""Update application configuration."""
_admin_app_group().replace(rsrc_id, rsrc)
return _admin_app_group().get(rsrc_id, dirty=True)
@schema.schema({'$ref': 'app_group.json#/resource_id'})
def delete(rsrc_id):
"""Delete configured application."""
_admin_app_group().delete(rsrc_id)
def _list(match=None):
"""List configured applications."""
if match is None:
match = '*'
app_groups = _admin_app_group().list({})
filtered = [
app_group for app_group in app_groups
if fnmatch.fnmatch(app_group['_id'], match)
]
return sorted(filtered, key=lambda item: item['_id'])
self.get = get
self.create = create
self.update = update
self.delete = delete
self.list = _list
| apache-2.0 | 3,978,231,618,686,663,700 | 30.780822 | 65 | 0.530172 | false | 3.979417 | false | false | false |
EeOneDown/spbu4u | app/new_functions.py | 1 | 22888 | import hashlib
import json
import logging
import re
from datetime import datetime, timedelta, date
import requests
from telebot.apihelper import ApiException
from telebot.types import Message
from app.constants import (
emoji, subject_short_types, week_day_number, months,
reg_before_30, reg_only_30, reg_only_31, interval_off_answer, urls,
yandex_error_answer, yandex_segment_answer, all_stations,
ask_to_select_types_answer, updated_types_answer
)
from config import Config
def get_term_dates():
"""
:return: Returns current term's start and end dates
:rtype: tuple
"""
today = datetime.today()
if today.month in range(2, 8):
start_year = today.year
end_year = today.year
start_month = 2
end_month = 8
else:
start_year = today.year - 1 if today.month < 2 else today.year
end_year = today.year + 1 if today.month > 7 else today.year
start_month = 8
end_month = 2
return (date(year=start_year, month=start_month, day=1),
date(year=end_year, month=end_month, day=1))
def is_resit(event):
"""
Checks event for resit type
:param event: an elements of `DayStudyEvents`
:type event: dict
:return: is resit
:rtype: bool
"""
return ("пересдача" in event["Subject"]
or "консультация" in event["Subject"]
or "комиссия" in event["Subject"])
def get_resits_events(events):
"""
Deletes all resits from events
:param events: all elements of `DayStudyEvents`
:type events: list
:return: list of available events
:rtype: list
"""
return [event for event in events if is_resit(event)]
def delete_resits_events(events):
"""
Deletes all resits from events
:param events: all elements of `DayStudyEvents`
:type events: list
:return: list of available events
:rtype: list
"""
return [event for event in events if not is_resit(event)]
def delete_cancelled_events(events):
"""
Function to delete all cancelled events.
:param events: all elements of `DayStudyEvents`
:type events: list
:return: list of available events
:rtype: list
"""
return [
event for event in events
if not event["IsCancelled"]
]
def create_events_blocks(events):
"""
Function to create list of events grouped by time.
:param events: all (or available) elements of `DayStudyEvents`
:type events: list
:return: list of events grouped by time
:rtype: list of list
"""
event_blocks = []
for i, event in enumerate(events):
if i and (
event["Start"] == events[i - 1]["Start"]
and event["End"] == events[i - 1]["End"]
):
event_blocks[-1].append(event)
else:
event_blocks.append([event])
return event_blocks
def datetime_from_string(dt_string):
"""
Converts string to datetime object
:param dt_string: datetime string
:type dt_string: str
:return: datetime object
:rtype: datetime
"""
return datetime.strptime(
dt_string.split("+")[0].split("Z")[0], "%Y-%m-%dT%H:%M:%S"
)
def get_key_by_value(dct, val):
"""
Gets key by value from input dictionary
:param dct: input dictionary
:type dct: dict
:param val: value in input dictionary (MUST BE)
:return: suitable key
"""
for item in dct.items():
if item[1] == val:
return item[0]
def get_work_monday(is_next_week=False):
"""
Returns date of current or next monday for Mon-Sat, nex monday for Sunday
:param is_next_week: (Optional) is for next week
:type is_next_week: bool
:return: monday date
:rtype: date
"""
today = date.today()
delta = today.weekday() if today.weekday() != 6 else -1
if is_next_week:
delta -= 7
return today - timedelta(days=delta)
def get_date_by_weekday_title(title, is_next_week=False):
"""
Returns date for current or next week by day short title
:param title: short weekday title (Russian)
:type title: str
:param is_next_week: (Optional) is for next week
:type is_next_week: bool
:return: date
:rtype: date
"""
work_monday = get_work_monday(is_next_week=is_next_week)
delta = week_day_number[title] - 1
return work_monday + timedelta(days=delta)
def datetime_to_string(date_value):
"""
Converts date object to string
:param date_value: date object
:type date_value: date
:return: date string
:rtype: str
"""
return "{day} {month_title} {year}".format(
day=date_value.day,
month_title=get_key_by_value(months, date_value.month),
year=date_value.year)
def text_to_date(text):
"""
Checks if the text is a date then converts it to a date object or
returns False
:param text: some text
:type text: str
:return: date object or False
:rtype: date or False
"""
regs = [reg_before_30, reg_only_30, reg_only_31]
for reg in regs:
res = re.search(reg, text)
if res:
groups = res.groups()
day = int(groups[0])
if groups[3]:
if groups[3].isdecimal():
month = int(groups[3])
else:
month = months[groups[3]]
else:
month = date.today().month
year = int(groups[5]) if groups[5] else date.today().year
try:
return date(day=day, month=month, year=year)
except ValueError:
return False
return False
def text_to_interval(text):
"""
Checks if text is a dates interval and converts it to two date objects or
returns False
:param text: some text
:type text: str
:return: two date objects
:rtype: tuple of date
"""
dates = text.split("-")
if len(dates) == 2:
from_date = text_to_date(dates[0].strip())
to_date = text_to_date(dates[1].strip())
if from_date and to_date and from_date < to_date:
return from_date, to_date
return False
def create_interval_off_answer(from_date, to_date):
"""
Creates interval off answer for dates
:param from_date: first date
:type from_date: date
:param to_date: second date
:type to_date: date
:return: interval off answer
:rtype: str
"""
return interval_off_answer.format(
emoji["sleep"],
datetime_to_string(from_date),
datetime_to_string(to_date)
)
def is_correct_educator_name(text):
"""
Checks if the text is correct
:param text: input text
:type text: str
:return: True or False
:rtype: bool
"""
return text.replace(".", "").replace("-", "").replace(" ", "").isalnum()
def parse_event_time(event):
return "{0} {1:0>2}:{2:0>2}{3}{4:0>2}:{5:0>2}".format(
emoji["clock"],
datetime_from_string(event["Start"]).time().hour,
datetime_from_string(event["Start"]).time().minute,
emoji["en_dash"],
datetime_from_string(event["End"]).time().hour,
datetime_from_string(event["End"]).time().minute
)
def parse_event_subject(event):
answer = ""
subject_name = ", ".join(event["Subject"].split(", ")[:-1])
subject_type = event["Subject"].split(", ")[-1]
# оставляем только перыве два слова
stripped_subject_type = " ".join(subject_type.split()[:2])
if stripped_subject_type in subject_short_types.keys():
answer += subject_short_types[stripped_subject_type] \
+ " " + emoji["en_dash"] + " "
else:
answer += subject_type.upper() \
+ " " + emoji["en_dash"] + " "
answer += subject_name
return answer
def parse_event_location(location, full_place=True, have_chosen_educator=False,
chosen_educator=None):
answer = ""
if location["IsEmpty"]:
return answer
if have_chosen_educator and not chosen_educator.issuperset(
{edu["Item2"].split(", ")[0] for edu in location["EducatorIds"]}
):
return answer
if full_place:
location_name = location["DisplayName"].strip(", ").strip()
else:
location_name = location["DisplayName"].split(", ")[-1].strip()
answer += location_name
if location["HasEducators"]:
educators = [educator["Item2"].split(", ")[0] for educator in
location["EducatorIds"]]
if educators:
answer += " <i>({0})</i>".format("; ".join(educators))
return answer
def parse_event_sub_loc_edu(event, full_place):
answer = "<b>" + parse_event_subject(event) + "</b>\n"
for location in event["EventLocations"]:
loc_answer = parse_event_location(location, full_place)
answer += loc_answer
if loc_answer:
if event["LocationsWereChanged"] or \
event["EducatorsWereReassigned"]:
answer += " " + emoji["warning"]
answer += "\n"
answer += "\n"
return answer
def create_schedule_answer(event, full_place):
answer = ""
if event["IsAssigned"]:
answer += emoji["new"] + " "
answer += parse_event_time(event)
if event["TimeWasChanged"]:
answer += " " + emoji["warning"]
answer += "\n" + parse_event_sub_loc_edu(event, full_place)
return answer
def create_master_schedule_answer(day_info):
answer = "{0} {1}\n\n".format(emoji["calendar"], day_info["DayString"])
for event in day_info["DayStudyEvents"]:
answer += "{0} {1} <i>({2})</i>\n".format(
emoji["clock"], event["TimeIntervalString"],
"; ".join(event["Dates"])
)
answer += "<b>"
subject_type = event["Subject"].split(", ")[-1]
stripped_subject_type = " ".join(subject_type.split()[:2])
if stripped_subject_type in subject_short_types.keys():
answer += subject_short_types[stripped_subject_type] \
+ " " + emoji["en_dash"] + " "
else:
answer += subject_type.upper() \
+ " " + emoji["en_dash"] + " "
answer += ", ".join(
event["Subject"].split(", ")[:-1]
) + "</b>\n"
for location in event["EventLocations"]:
location_name = location["DisplayName"]
answer += location_name + " <i>({0})</i>\n".format(
"; ".join(name["Item1"] for name in
event["ContingentUnitNames"])
)
answer += "\n"
return answer
def get_hours_minutes_by_seconds(seconds):
"""
Gets hours and minutes by input seconds
:param seconds: seconds
:type seconds: int
:return: hours and minutes
:rtype: tuple
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m
def get_yandex_raw_data(from_station, to_station, for_date):
"""
Gets yandex raw data and status code
:param from_station: `from` station yandex code
:type from_station: str
:param to_station: `to` station yandex code
:type to_station: str
:param for_date: date for which data should be received
:type for_date: date
:return: status code and raw json data
:rtype: tuple
"""
params = {
"from": from_station,
"to": to_station,
"apikey": Config.YANDEX_API_KEY,
"date": for_date,
"format": "json",
"lang": "ru_RU",
"transport_types": "suburban"
}
url = urls["ya_search"]
req = requests.get(url, params=params)
return req.status_code, req.json()
def parse_yandex_segment(segment):
"""
Parses segments data to `yandex_segment_answer`
:param segment: segment's json data from api.rasp.yandex's search method
:type segment: dict
:return: parsed yandex segment answer
:rtype: str
"""
departure_datetime = datetime_from_string(segment["departure"])
arrival_datetime = datetime_from_string(segment["arrival"])
hours, minutes = get_hours_minutes_by_seconds(
(departure_datetime - datetime.now()).seconds
)
if hours:
time_mark = emoji["blue_diamond"]
lef_time = "{0} ч {1} мин".format(hours, minutes)
elif 15.0 < minutes < 60.0:
time_mark = emoji["orange_diamond"]
lef_time = "{0} мин".format(minutes)
else:
time_mark = emoji["runner"]
lef_time = "{0} мин".format(minutes)
if segment["thread"]["express_type"]:
train_mark = emoji["express"]
else:
train_mark = emoji["train"]
if segment["tickets_info"]:
price = str(
segment["tickets_info"]["places"][0]["price"]["whole"]
)
if segment["tickets_info"]["places"][0]["price"]["cents"]:
price += ",{0}".format(
segment["tickets_info"]["places"][0]["price"]["cents"]
)
else:
price = "?"
return yandex_segment_answer.format(
time_mark=time_mark,
lef_time=lef_time,
train_mark=train_mark,
dep_time=departure_datetime.time().strftime("%H:%M"),
arr_time=arrival_datetime.time().strftime("%H:%M"),
price=price,
ruble_sign=emoji["ruble_sign"]
)
def create_suburbans_answer(from_code, to_code, for_date, limit=3):
"""
Creates yandex suburbans answer for date by stations codes
:param from_code: `from` yandex station code
:type from_code: str
:param to_code: `to` yandex station code
:type to_code: str
:param for_date: date for which data should be received
:type for_date: date
:param limit: limit of segments in answer
:type limit: int
:return: tuple with `answer`, `is_tomorrow` and `is_error` data
:rtype: tuple
"""
code, data = get_yandex_raw_data(from_code, to_code, for_date)
if code != 200:
return yandex_error_answer, False, True
from_title = data["search"]["from"]["title"]
to_title = data["search"]["to"]["title"]
answer = ""
for segment in data["segments"]:
if len(answer.split("\n\n")) > limit:
break
if datetime_from_string(segment["departure"]) >= datetime.now():
answer += parse_yandex_segment(segment)
if answer:
answer = "<b>{0}</b> => <b>{1}</b>\n\n".format(
from_title, to_title
) + answer
is_tomorrow = False
else:
for_date = date.today() + timedelta(days=1)
answer += create_suburbans_answer(
from_code, to_code, for_date, limit=5
)[0]
is_tomorrow = True
return answer, is_tomorrow, False
def get_station_title_from_text(text, is_end=False, is_full=False):
"""
Gets start/end station title from bot's answer text
:param text: bot's answer text
:type text: str
:param is_end: is get end station title
:type is_end: bool
:param is_full: if the text is full answer created by
`create_suburbans_answer` function
:type is_full: bool
:return: station title
:rtype: str
"""
if is_full:
first_i, last_i, split_by = 0, int(is_end), " => "
else:
first_i, last_i, split_by = int(is_end), -1, ": "
return text.split("\n")[first_i].split(split_by)[last_i]
def get_station_code_from_text(text, is_end=False, is_full=False):
"""
Gets start/end station yandex code from bot's answer text
:param text: bot's answer text
:type text: str
:param is_end: is get end station title
:type is_end: bool
:param is_full: if the text is full answer created by
`create_suburbans_answer` function
:type is_full: bool
:return: yandex station code
:rtype: str
"""
return all_stations[get_station_title_from_text(text, is_end, is_full)]
def add_end_station(text, end_title):
"""
Changes answer text by adding end station title
:param text: bot's answer text
:type text: str
:param end_title: end station title
:type end_title: str
:return: answer text
:type: str
"""
return "Начальная: <b>{0}</b>\nКончная: <b>{1}</b>\nВыбери день:".format(
get_station_title_from_text(text), end_title
)
def update_suburbans_answer(text, show_more=False, for_tomorrow=False):
"""
Updates suburbans answer created by `create_suburbans_answer` function
:param text: bot's answer text
:type text: str
:param show_more: is need to show future trails
:type show_more: bool
:param for_tomorrow: is need to show trails for tomorrow
:type for_tomorrow: bool
:return:
"""
return create_suburbans_answer(
from_code=get_station_code_from_text(text, is_full=True),
to_code=get_station_code_from_text(text, is_full=True, is_end=True),
for_date=date.today() + timedelta(days=int(for_tomorrow)),
limit=100 if show_more else (7 if for_tomorrow else 3)
)
def bot_waiting_for(msg, waiting_bot_text):
"""
Checks if the message is a reply to `waiting_bot_text`
:param msg: bot's message
:type msg: Message
:param waiting_bot_text: text which bot sent
:type waiting_bot_text: str
:return: True or False
:rtype: bool
"""
if msg.reply_to_message:
if msg.reply_to_message.from_user.username == Config.BOT_NAME:
if msg.reply_to_message.text == waiting_bot_text:
return True
return False
def get_block_data_from_block_answer(text):
"""
Gets count of blocks, current block number and schedule's date from
created block answer by `User.get_block_answer()`
:param text: block answer
:type text: str
:return: count of blocks, current block number and schedule's date
:rtype: tuple
"""
rows = text.split("\n\n")
current_block_num, blocks_count = list(map(int, rows[0].split()[::2]))
for_date = get_date_by_weekday_title(rows[1].split()[-1][1:-1])
return blocks_count, current_block_num, for_date
def get_event_data_from_block_answer(text, idx):
"""
Gets event's day short title, time, type, name and educators from
created block answer by `User.get_block_answer()`
:param text: block answer
:type text: str
:param idx: event index
:type idx: int
:return: event's day short title, time, type, name and educators
:rtype: tuple
"""
rows = text.split("\n\n")
emoji_time_day = rows[1].split()
event_time = emoji_time_day[1]
event_day_short = emoji_time_day[-1][1:-1]
event_data = rows[idx + 2].split("\n")
event_type_name = event_data[0].split(" " + emoji["en_dash"] + " ")
event_name = (" " + emoji["en_dash"] + " ").join(event_type_name[1:])
event_type = event_type_name[0].strip(
str(idx + 1) + ". "
).replace(
emoji["cross_mark"] + " ", ""
)
event_educators = "; ".join(
[place_edu.split("(")[-1].split(")")[0]
for place_edu in event_data[1:]]
)
return event_day_short, event_time, event_type, event_name, event_educators
def update_types_answer(text, new_type):
"""
Updates text by adding/removing type
:param text: bot's ald text
:type text: str
:param new_type: selected short type
:type new_type: str
:return: updated answer
:rtype: str
"""
lesson_data = text.split("\n\n")[1].split("\n")
if lesson_data[-1] == "Типы: Все":
types = [new_type]
else:
types = lesson_data[-1][6:].split("; ")
if new_type in types:
types.remove(new_type)
else:
types.append(new_type)
lesson_data[-1] = "Типы: {0}".format("; ".join(types) if types else "Все")
return "\n\n".join([
updated_types_answer,
"\n".join(lesson_data),
ask_to_select_types_answer
])
def get_lesson_data(data, hide_type):
"""
Creates dict with lesson data for `Lesson.__init__()`
:param data: `selected_lesson_info_answer` with data
:type data: str
:param hide_type: `ЛЛЛ`, `ЛКК`, etc
:type hide_type: str
:return: dict with lesson data
:rtype: dict
"""
return dict(
name=data[2][10:],
types=[get_key_by_value(dct=subject_short_types, val=t) for t in
data[-1][6:].split("; ")] if "Все" not in data[-1] else [],
days=[data[0][6:]] if hide_type[0] == "К" else [],
times=[data[1][7:]] if hide_type[1] == "К" else [],
educators=data[3][15:].split("; ") if hide_type[2] == "К" else [],
locations=[]
)
def tgbot_send_long_message(bot, text, user_id, split="\n\n"):
try:
bot.send_message(user_id, text, parse_mode="HTML")
except ApiException as ApiExcept:
json_err = json.loads(ApiExcept.result.text)
if json_err["description"] == "Bad Request: message is too long":
event_count = len(text.split(split))
first_part = split.join(text.split(split)[:event_count // 2])
second_part = split.join(text.split(split)[event_count // 2:])
tgbot_send_long_message(bot, first_part, user_id, split)
tgbot_send_long_message(bot, second_part, user_id, split)
def tgbot_edit_first_and_send_messages(bot, texts, bot_msg, split="\n\n"):
try:
bot.edit_message_text(
text=texts[0],
chat_id=bot_msg.chat.id,
message_id=bot_msg.message_id,
parse_mode="HTML"
)
except ApiException:
tgbot_send_long_message(bot, texts[0], bot_msg.chat.id, split)
for text in texts[1:]:
tgbot_send_long_message(bot, text, bot_msg.chat.id, split)
def write_log(update, work_time, was_error=False):
if update.message:
chat_id = update.message.chat.id
user_text = update.message.text or "NO TEXT"
elif update.callback_query:
chat_id = update.callback_query.message.chat.id
user_text = update.callback_query.data
else:
chat_id = "ERROR"
user_text = str(update)
log = "CHAT: {0} ===== TEXT: {1} ===== TIME: {2}".format(
chat_id, user_text, work_time)
if was_error:
log += "\t\t\tERROR"
logging.info(log)
def generate_hash(content: bytes):
"""
Create md5 hash from bytes
:param content: some data in bytes
:type content: bytes
:return: hash string
:rtype: str
"""
return hashlib.sha3_512(content).hexdigest()
def check_hash(cur_hash: str, content: bytes):
"""
Checks current hash and content
:param cur_hash: current hash
:type cur_hash: str
:param content: some data in bytes
:type content: bytes
:return: is same
:rtype: bool
"""
return cur_hash == generate_hash(content)
| apache-2.0 | -8,741,936,217,095,794,000 | 27.638994 | 79 | 0.589468 | false | 3.440314 | false | false | false |
ahwillia/tensortools | tensortools/ensemble.py | 1 | 6151 | from tensortools import optimize
from tensortools.diagnostics import kruskal_align
from tqdm import trange
from collections.abc import Iterable
import numpy as np
class Ensemble(object):
"""
Represents an ensemble of fitted tensor decompositions.
"""
def __init__(self, nonneg=False, fit_method=None, fit_options=dict()):
"""Initializes Ensemble.
Parameters
----------
nonneg : bool
If True, constrains low-rank factor matrices to be nonnegative.
fit_method : None, str, callable, optional (default: None)
Method for fitting a tensor decomposition. If input is callable,
it is used directly. If input is a string then method is taken
from tensortools.optimize using ``getattr``. If None, a reasonable
default method is chosen.
fit_options : dict
Holds optional arguments for fitting method.
"""
# Model parameters
self._nonneg = nonneg
# Determinine optimization method. If user input is None, try to use a
# reasonable default. Otherwise check that it is callable.
if fit_method is None:
self._fit_method = optimize.ncp_bcd if nonneg else optimize.cp_als
elif isinstance(fit_method, str):
try:
self._fit_method = getattr(optimize, fit_method)
except AttributeError:
raise ValueError("Did not recognize method 'fit_method' "
"{}".format(fit_method))
elif callable(fit_method):
self._fit_method = fit_method
else:
raise ValueError("Expected 'fit_method' to be a string or "
"callable.")
# Try to pick reasonable defaults for optimization options.
fit_options.setdefault('tol', 1e-5)
fit_options.setdefault('max_iter', 500)
fit_options.setdefault('verbose', False)
self._fit_options = fit_options
# TODO - better way to hold all results...
self.results = dict()
def fit(self, X, ranks, replicates=1, verbose=True):
"""
Fits CP tensor decompositions for different choices of rank.
Parameters
----------
X : array_like
Real tensor
ranks : int, or iterable
iterable specifying number of components in each model
replicates: int
number of models to fit at each rank
verbose : bool
If True, prints summaries and optimization progress.
"""
# Make ranks iterable if necessary.
if not isinstance(ranks, Iterable):
ranks = (ranks,)
# Iterate over model ranks, optimize multiple replicates at each rank.
for r in ranks:
# Initialize storage
if r not in self.results:
self.results[r] = []
# Display fitting progress.
if verbose:
itr = trange(replicates,
desc='Fitting rank-{} models'.format(r),
leave=False)
else:
itr = range(replicates)
# Fit replicates.
for i in itr:
model_fit = self._fit_method(X, r, **self._fit_options)
self.results[r].append(model_fit)
# Print summary of results.
if verbose:
itr.close()
itr.refresh()
min_obj = min([res.obj for res in self.results[r]])
max_obj = max([res.obj for res in self.results[r]])
elapsed = sum([res.total_time for res in self.results[r]])
print('Rank-{} models: min obj, {:.2f}; '
'max obj, {:.2f}; time to fit, '
'{:.1f}s'.format(r, min_obj, max_obj, elapsed), flush=True)
# Sort results from lowest to largest loss.
for r in ranks:
idx = np.argsort([result.obj for result in self.results[r]])
self.results[r] = [self.results[r][i] for i in idx]
# Align best model within each rank to best model of next larger rank.
# Here r0 is the rank of the lower-dimensional model and r1 is the rank
# of the high-dimensional model.
for i in reversed(range(1, len(ranks))):
r0, r1 = ranks[i-1], ranks[i]
U = self.results[r0][0].factors
V = self.results[r1][0].factors
kruskal_align(U, V, permute_U=True)
# For each rank, align everything to the best model
for r in ranks:
# store best factors
U = self.results[r][0].factors # best model factors
self.results[r][0].similarity = 1.0 # similarity to itself
# align lesser fit models to best models
for res in self.results[r][1:]:
res.similarity = kruskal_align(U, res.factors, permute_V=True)
def objectives(self, rank):
"""Returns objective values of models with specified rank.
"""
self._check_rank(rank)
return [result.obj for result in self.results[rank]]
def similarities(self, rank):
"""Returns similarity scores for models with specified rank.
"""
self._check_rank(rank)
return [result.similarity for result in self.results[rank]]
def factors(self, rank):
"""Returns KTensor factors for models with specified rank.
"""
self._check_rank(rank)
return [result.factors for result in self.results[rank]]
def _check_rank(self, rank):
"""Checks if specified rank has been fit.
Parameters
----------
rank : int
Rank of the models that were queried.
Raises
------
ValueError: If no models of rank ``rank`` have been fit yet.
"""
if rank not in self.results:
raise ValueError('No models of rank-{} have been fit.'
'Call Ensemble.fit(tensor, rank={}, ...) '
'to fit these models.'.format(rank))
| mit | 7,220,950,852,834,618,000 | 36.278788 | 81 | 0.558934 | false | 4.396712 | false | false | false |
kingctan/Misago | misago/forums/migrations/0002_default_forums.py | 8 | 1785 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.translation import ugettext as _
from misago.core.utils import slugify
def create_default_forums_tree(apps, schema_editor):
Forum = apps.get_model('misago_forums', 'Forum')
Forum.objects.create(
special_role='private_threads',
role='forum',
name='Private',
slug='private',
lft=1,
rght=2,
tree_id=0,
level=0,
)
root = Forum.objects.create(
special_role='root_category',
role='category',
name='Root',
slug='root',
lft=3,
rght=10,
tree_id=1,
level=0,
)
category_name = _("First category")
forum_name = _("First forum")
redirect_name = _("Misago support forums")
redirect_link = _("http://misago-project.org")
category = Forum.objects.create(
parent=root,
lft=4,
rght=9,
tree_id=1,
level=1,
role='category',
name=category_name,
slug=slugify(category_name),
css_class='accent')
Forum.objects.create(
parent=category,
lft=5,
rght=6,
tree_id=1,
level=2,
role='forum',
name=forum_name,
slug=slugify(forum_name))
Forum.objects.create(
parent=category,
lft=7,
rght=8,
tree_id=1,
level=2,
role='redirect',
name=redirect_name,
slug=slugify(redirect_name),
redirect_url=redirect_link)
class Migration(migrations.Migration):
dependencies = [
('misago_forums', '0001_initial'),
]
operations = [
migrations.RunPython(create_default_forums_tree),
]
| gpl-2.0 | -3,845,954,754,693,966,000 | 21.037037 | 57 | 0.555182 | false | 3.606061 | false | false | false |
subdownloader/subdownloader | subdownloader/webutil.py | 3 | 3398 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 SubDownloader Developers - See COPYING - GPLv3
import logging
import socket
from ssl import SSLError
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, urlretrieve
log = logging.getLogger('subdownloader.http')
DEFAULT_TIMEOUT = 300
# FIXME: allow download and unzip in one step?
def test_connection(url, timeout=DEFAULT_TIMEOUT):
"""
Open a connection to the url.
:param url: Url to test
:param timeout: Timeout
:return: True if connection could be made.
"""
log.debug('testConnection: url={}, timeout={}'.format(url, timeout))
# FIXME: For Python3 ==> use urlopen(timeout=...) and get rid of socket
defTimeOut = socket.getdefaulttimeout()
try:
timeout = float(timeout)
except ValueError:
log.debug('Illegal timeout argument. {} ({})'.format(
timeout, type(timeout)))
socket.setdefaulttimeout(timeout)
connectable = False
log.debug('Test connection "{}", timeout={}'.format(url, timeout))
try:
urlopen(url)
log.debug('urlopen succeeded')
connectable = True
except (HTTPError, URLError, SSLError, socket.error):
log.exception('url failed')
socket.setdefaulttimeout(defTimeOut)
return connectable
def download_raw(url, local_path, callback):
"""
Download an url to a local file.
:param url: url of the file to download
:param local_path: path where the downloaded file should be saved
:param callback: instance of ProgressCallback
:return: True is succeeded
"""
log.debug('download_raw(url={url}, local_path={local_path})'.format(url=url, local_path=local_path))
raw_progress = RawDownloadProgress(callback)
reporthook = raw_progress.get_report_hook()
try:
log.debug('urlretrieve(url={url}, local_path={local_path}) ...'.format(url=url, local_path=local_path))
urlretrieve(url=url, filename=local_path, reporthook=reporthook)
log.debug('... SUCCEEDED')
callback.finish(True)
return True
except URLError:
log.exception('... FAILED')
callback.finish(False)
return False
class RawDownloadProgress(object):
"""
Subclass of ProgressCallback purposed for reporting back download progress.
"""
def __init__(self, callback):
"""
Create a new RawDownloadProgress that encapsulates a ProgressCallback to record download progress.
:param callback: ProgressCallback to encapsulate
"""
self._callback = callback
self._chunkNumber = 0
self._total = 0
def get_report_hook(self):
"""
Return a callback function suitable for using reporthook argument of urllib(.request).urlretrieve
:return: function object
"""
def report_hook(chunkNumber, chunkSize, totalSize):
if totalSize != -1 and not self._callback.range_initialized():
log.debug('Initializing range: [{},{}]'.format(0, totalSize))
self._callback.set_range(0, totalSize)
self._chunkNumber = chunkNumber
self._total += chunkSize
if self._total > totalSize:
# The chunk size can be bigger than the file
self._total = totalSize
self._callback.update(self._total)
return report_hook
| gpl-3.0 | 7,107,083,423,462,436,000 | 33.323232 | 111 | 0.64744 | false | 4.258145 | false | false | false |
ankitsejwal/Lyndor | run.py | 1 | 6128 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Lyndor runs from here - contains the main functions '''
import sys, time, os
import module.message as message
import module.save as save
import module.cookies as cookies
import module.read as read
import install
import module.move as move
import module.draw as draw
import module.rename as rename
import module.exercise_file as exercise_file
from colorama import Fore, init
def main():
''' Main function '''
init()
message.animate_characters(Fore.LIGHTYELLOW_EX, draw.ROCKET, 0.02)
message.spinning_cursor()
message.print_line('\r1. Paste course url or\n' +
'2. Press enter for Bulk Download')
url = input()
print('')
start_time = time.time() #start time counter begins
if url == "":
# If user press Enter (i.e. url empty), get urls from Bulkdownload.txt
urls = read.bulk_download()
if not urls:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, 'Please paste urls in Bulk Download.txt\n'))
for url in urls:
schedule_download(url)
else:
# begin regular download
schedule_download(url)
try:
end_time = time.time()
message.animate_characters(Fore.LIGHTGREEN_EX, draw.COW, 0.02)
message.colored_message(Fore.LIGHTGREEN_EX, "\nThe whole process took {}\n".format(move.hms_string(end_time - start_time)))
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def schedule_download(url):
''' Look for the scheduled time in settings.json '''
if not read.aria2_installed:
tip = '☝🏻 Tip: Install aria2c for faster downloads, read README.md to learn more.'
message.carriage_return_animate(tip)
if read.download_time == '':
# If download time not set, begin download
download_course(url)
return
else:
counter = True
message.colored_message(Fore.LIGHTGREEN_EX, 'Download time set to: ' + read.download_time + '\
in settings.json, you can change or remove this time in settings.json\n')
try:
while counter:
if time.strftime("%H:%M") == read.download_time:
download_course(url)
return
print(f'Download will start at: {read.download_time} leave this window open.')
time.sleep(60)
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def download_course(url):
''' download course '''
# Check for a valid url
if url.find('.html') == -1:
sys.exit(message.animate_characters(Fore.LIGHTRED_EX, draw.ANONYMOUS, 0.02))
url = url[:url.find(".html")+5] #strip any extra text after .html in the url
# Folder/File paths
lynda_folder_path = read.location + '/'
course_folder_path = save.course_path(url, lynda_folder_path)
desktop_folder_path = install.get_path("Desktop")
download_folder_path = install.get_path("Downloads")
# Read preferences
use_cookie_for_download = read.course_download_pref
if use_cookie_for_download in ['cookies', 'cookie'] or read.exfile_download_method == 'aria2':
cookie_path = cookies.find_cookie(desktop_folder_path, download_folder_path)
downloading_from_cookie = message.return_colored_message(Fore.LIGHTBLUE_EX, '🍪 Downloading videos using cookies.txt')
message.carriage_return_animate(downloading_from_cookie)
else:
cookie_path = ''
usr_pass_message = message.return_colored_message(Fore.LIGHTGREEN_EX, '⛺ Using username and password combination for download\n')
message.carriage_return_animate(usr_pass_message)
try:
# main operations ->
save.course(url, lynda_folder_path) # Create course folder
save.info_file(url, course_folder_path) # Gather information
save.chapters(url, course_folder_path) # Create chapter folders
save.contentmd(url) # Create content.md
save.videos(url, cookie_path, course_folder_path) # Download videos
rename.videos(course_folder_path) # rename videos
rename.subtitles(course_folder_path) # rename subtitles
move.vid_srt_to_chapter(url, course_folder_path) # Move videos and subtitles to chapter folders
# Download exercise files
if save.check_exercise_file(url):
print('\nExercise file is available to download')
if not read.download_exercise_file:
# if user do not want to download ex-file
print("settings.json says you do not want to download ex-file -> 'download_exercise_file': false")
else:
# if user wants to download ex-file
if read.course_download_pref == 'regular-login':
exercise_file.download(url, course_folder_path, cookie_path)
elif read.exfile_download_pref == 'library-login':
if read.card_number == '':
print('\nTo download ex-file via library login -> Please save library card details in settings.json')
else:
exercise_file.download(url, course_folder_path, cookie_path)
else:
print('\nThe exercise file can only be downloaded through one of the below combinations:')
print('~ Regular login: username + password or')
print('~ Library login: card number, pin and org. url\n')
else: # if exercise file not present
print('This course does not include Exercise files.')
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
| mit | 5,770,026,001,771,497,000 | 42.390071 | 138 | 0.623079 | false | 3.983073 | false | false | false |
gkadillak/rockstor-core | src/rockstor/storageadmin/models/update_subscription.py | 4 | 1351 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from storageadmin.models import Appliance
class UpdateSubscription(models.Model):
"""name of the channel. eg: stable"""
name = models.CharField(max_length=64, unique=True)
"""detailed description or a longer name"""
description = models.CharField(max_length=128)
"""url of the repo"""
url = models.CharField(max_length=512)
appliance = models.ForeignKey(Appliance)
password = models.CharField(max_length=64, null=True)
"""status of subscription: active, inactive, expired etc.."""
status = models.CharField(max_length=64)
class Meta:
app_label = 'storageadmin'
| gpl-3.0 | -2,032,936,560,911,440,100 | 35.513514 | 68 | 0.740192 | false | 3.938776 | false | false | false |
paris-ci/CloudBot | plugins/vdm.py | 1 | 1646 | import urllib.request
from cloudbot import hook
class vdm:
def __init__(self):
try:
self.page = urllib.request.urlopen("http://feeds.feedburner.com/viedemerde").read().decode('utf-8')
except IOError:
self.page = ''
def new_story(self):
"""The fastest and the recomended option"""
start_quote = self.page.find("Aujourd'hui, ")
end_quote = self.page.find(". VDM") + 5
vdm = self.page[start_quote:end_quote]
self.page = self.page[end_quote:]
if len(vdm) >= 310:
return self.new_story()
return vdm
def random_story(self):
"""Get a story from vdm"""
chars_to_delete = ['</a><a href="', 'class="fmllink">', "/sante/'", "/sexe/", "/travail/", "/animaux/",
"</a>", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "/inclassable/", "/amour/",
"/enfants/", "/argent/", '"', "?quot;"]
page = urllib.request.urlopen("http://www.viedemerde.fr/aleatoire").read().decode('utf-8')
story = (page[page.find('class="fmllink">') + 16:page.find('" class="fmllink"> VDM</a>') + 26])
del page
for x in chars_to_delete:
story = story.replace(x, "")
if 310 >= len(story):
return story
return self.random_story()
@hook.command("vdm", "viedemerde")
def main(reply):
x = vdm()
try:
s = x.random_story()
if s != '':
reply("%s\n" % s)
else:
reply("%s\n" % x.new_story())
except IOError:
reply("Erreur de connection :(")
return None
| gpl-3.0 | -7,389,962,136,722,546,000 | 31.27451 | 112 | 0.503645 | false | 3.285429 | false | false | false |
molobrakos/home-assistant | homeassistant/components/sisyphus/__init__.py | 6 | 2276 | """Support for controlling Sisyphus Kinetic Art Tables."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
DATA_SISYPHUS = 'sisyphus'
DOMAIN = 'sisyphus'
AUTODETECT_SCHEMA = vol.Schema({})
TABLE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
})
TABLES_SCHEMA = vol.Schema([TABLE_SCHEMA])
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Any(AUTODETECT_SCHEMA, TABLES_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the sisyphus component."""
from sisyphus_control import Table
tables = hass.data.setdefault(DATA_SISYPHUS, {})
table_configs = config.get(DOMAIN)
session = async_get_clientsession(hass)
async def add_table(host, name=None):
"""Add platforms for a single table with the given hostname."""
table = await Table.connect(host, session)
if name is None:
name = table.name
tables[name] = table
_LOGGER.debug("Connected to %s at %s", name, host)
hass.async_create_task(async_load_platform(
hass, 'light', DOMAIN, {
CONF_NAME: name,
}, config
))
hass.async_create_task(async_load_platform(
hass, 'media_player', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
}, config
))
if isinstance(table_configs, dict): # AUTODETECT_SCHEMA
for ip_address in await Table.find_table_ips(session):
await add_table(ip_address)
else: # TABLES_SCHEMA
for conf in table_configs:
await add_table(conf[CONF_HOST], conf[CONF_NAME])
async def close_tables(*args):
"""Close all table objects."""
tasks = [table.close() for table in tables.values()]
if tasks:
await asyncio.wait(tasks)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_tables)
return True
| apache-2.0 | 3,408,656,939,845,342,700 | 28.558442 | 72 | 0.644112 | false | 3.700813 | true | false | false |
svisser/bdgt | bdgt/application.py | 1 | 6945 | import argparse
import logging
import os
from decimal import Decimal
import colorama
from bdgt import get_data_dir, get_version
from bdgt.commands.factory import CommandFactory
from bdgt.storage.database import open_database
_log = logging.getLogger(__name__)
def process_cmd(args):
try:
command = CommandFactory.create(args)
output = command()
except Exception as e:
print "Error: {}".format(str(e))
else:
print output
def main():
colorama.init()
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--database',
help="The absolute path to the bdgt database. " +
"If not specified, ~/.bdgt/bdgt.db is used.")
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_version()))
subparsers = parser.add_subparsers(dest='command')
# Account
account_parser = subparsers.add_parser(
'account',
help="Manage accounts"
)
account_subparsers = account_parser.add_subparsers(dest='sub_command')
account_add_parser = account_subparsers.add_parser(
'add',
help='Add an account'
)
account_add_parser.add_argument(
'name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
account_add_parser.add_argument(
'number', type=unicode,
help="The account number for the account."
)
account_subparsers.add_parser(
'list',
help="List accounts"
)
account_delete_parser = account_subparsers.add_parser(
'delete',
help="Delete an account"
)
account_delete_parser.add_argument(
'name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
# Import
import_parser = subparsers.add_parser(
'import',
help="Import transactions"
)
import_subparsers = import_parser.add_subparsers(dest='sub_command')
import_file_parser = import_subparsers.add_parser(
'file',
help="Import transactions from a file"
)
import_file_parser.add_argument(
'type_', type=unicode, choices=["mt940", "ofx"],
help="The type of the file being imported."
)
import_file_parser.add_argument(
'file_',
help="The path of the file to import."
)
import_subparsers.add_parser(
'status',
help="View the status of an import that's in progress"
)
import_add_parser = import_subparsers.add_parser(
'add',
help="Add parsed transactions to the staging area"
)
import_add_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
import_remove_parser = import_subparsers.add_parser(
'remove',
help="Remove parsed transactions from the staging area"
)
import_remove_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
import_subparsers.add_parser(
'reset',
help="Resets the import process."
)
import_subparsers.add_parser(
'commit',
help="Commit parsed transactions to the database."
)
import_set_parser = import_subparsers.add_parser(
'set',
help="Set the value of a field in a parsed transaction"
)
import_set_parser.add_argument(
'field', type=unicode, choices=["account", "category"],
help="The field of which the value is to be set."
)
import_set_parser.add_argument(
'value', type=unicode,
help="The value to set the field to."
)
import_set_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
# TX
tx_parser = subparsers.add_parser(
'tx',
help="Manage transactions"
)
tx_subparsers = tx_parser.add_subparsers(dest='sub_command')
tx_list_parser = tx_subparsers.add_parser(
'list',
help="List transactions"
)
tx_list_parser.add_argument(
'account_name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
tx_assign_parser = tx_subparsers.add_parser(
'assign',
help="Assign transactions to a category."
)
tx_assign_parser.add_argument(
'category_name', type=unicode,
help="The name of the category"
)
tx_assign_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
tx_unassign_parser = tx_subparsers.add_parser(
'unassign',
help="Unassign a transaction from a category."
)
tx_unassign_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
tx_reconcile_parser = tx_subparsers.add_parser(
'reconcile',
help="Mark transactions as reconciled."
)
tx_reconcile_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
# Set
set_parser = subparsers.add_parser(
'set',
help="Set a budget for a category."
)
set_parser.add_argument(
'category_name', type=unicode,
help="The name of the category"
)
set_parser.add_argument(
'period', type=unicode, choices=["week", "month", "quarter", "year"],
help="The period the spending limit applies to."
)
set_parser.add_argument(
'amount', type=Decimal,
help="The spending limit amount."
)
# TODO: Month must be between 1 and 12
# TODO: Year must be 4 digits
status_parser = subparsers.add_parser(
'status',
help="View the status of a budget for the given month and year."
)
status_parser.add_argument(
'month', type=int,
)
status_parser.add_argument(
'year', type=int
)
args = parser.parse_args()
# Open database
if args.database:
open_database(args.database)
else:
bdgt_dir = get_data_dir()
if not os.path.exists(bdgt_dir):
os.makedirs(bdgt_dir)
bdgt_db = os.path.join(bdgt_dir, "bdgt.db")
open_database("sqlite:///{}".format(bdgt_db))
# Process command
process_cmd(args)
| gpl-3.0 | -340,558,074,410,671,550 | 29.866667 | 77 | 0.599568 | false | 3.760152 | false | false | false |
inkenbrandt/ArcPy | prism/PRISM (2).py | 2 | 4456 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 01 13:52:49 2014
@author: paulinkenbrandt
http://gis.stackexchange.com/questions/108113/loop-with-arcpy-listfiles
"""
import arcpy
import os
from arcpy import env
from arcpy.sa import *
import arcpy_metadata as md
import datetime
#r'C:\GIS\PRISM\S\MAY_OCT_14'
env.workspace = arcpy.GetParameterAsText(0)
outplace = arcpy.GetParameterAsText(1)
# Uncomment the following if you are using asc files
ischecked1 = arcpy.GetParameterAsText(2)
ischecked2 = arcpy.GetParameterAsText(3)
ascFileList = arcpy.ListFiles("*.asc")
if str(ischecked1) == 'true':
for ascFile in ascFileList:
if int(ascFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
# get the file name without extension (replaces the %Name% variable from ModelBuidler)
ascFileName = os.path.splitext(ascFile)[0]
# define the output file
rastFile = env.workspace + '/' + ascFileName + 'o'
ascinFile = env.workspace + '/' + ascFile
arcpy.ASCIIToRaster_conversion(ascinFile, rastFile, 'INTEGER')
if str(ischecked2) == 'true':
# the following defines projections and clips the PRISM raster file
imgFileList = arcpy.ListRasters()
for imgFile in imgFileList:
if int(imgFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
imgFileName = os.path.splitext(imgFile)[0]
imgFile1 = env.workspace + '/' + imgFileName + 'p'
incoords = arcpy.GetParameterAsText(4)
# Process: Projektion definieren
arcpy.DefineProjection_management(imgFile, incoords)
outExtractByMask = ExtractByMask(imgFile, arcpy.GetParameterAsText(5))
outExtractByMask.save(imgFile1)
arcpy.AddMessage("Clipped " +imgFileName)
arcpy.AddMessage("Finished Clipping Data!")
outcoord = arcpy.GetParameterAsText(6)
ischecked3 = arcpy.GetParameterAsText(7)
# the following projects the rasters and downsamples them
if str(ischecked3) == 'true':
prjFileList = arcpy.ListRasters()
for prjFile in prjFileList:
if int(prjFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
prjFileName = os.path.splitext(prjFile)[0]
prjFile1 = outplace + '/' + prjFileName
arcpy.ProjectRaster_management(prjFile, prjFile1 ,outcoord, "CUBIC", arcpy.GetParameterAsText(10))
arcpy.AddMessage("Projected and downsampled " +prjFileName)
arcpy.AddMessage("Finished Downsampling Data!")
# convert from mm to inches of ppt
ischecked4 = arcpy.GetParameterAsText(11)
if str(ischecked4) == 'true':
env.workspace = outplace
calcFileList = arcpy.ListRasters()
for calcFile in calcFileList:
if int(calcFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
calcFileName = os.path.splitext(calcFile)[0]
calcFile1 = outplace + '/' + 'a' + calcFileName[1:-1]
arcpy.Times_3d(calcFile,0.0393701,calcFile1)
arcpy.AddMessage("Converted " + calcFileName + ' to inches')
# Add Metadata Input
ischecked5 = arcpy.GetParameterAsText(12)
if str(ischecked5) == 'true':
env.workspace = outplace
metaFileList = arcpy.ListRasters('a*')
for metafile in metaFileList:
if int(metafile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
metaplace = outplace + '/' + metafile
metadata = md.MetadataEditor(metaplace)
metadata.title.set('PRISM precipitation data (inches) ' + metafile[-3:-1] + ' ' + metafile[1:-3] ) #
metadata.purpose.set('PRISM Raster File in Inches ' + metafile[-3:-1] + ' ' + metafile[1:-3])
metadata.abstract.append('PRISM Raster File in Inches ' + metafile[-3:-1] + ' ' + metafile[1:-3])
metadata.tags.add(["PRISM", "Precipitation", "Inches",metafile[-3:-1],metafile[1:-3] ]) # tags.extend is equivalent to maintain list semantics
metadata.finish() # save the metadata back to the original source feature class and cleanup. Without calling finish(), your edits are NOT saved!
arcpy.AddMessage("Added Metadata to " + metafile + ' to inches')
| gpl-2.0 | -830,894,340,026,066,000 | 42.262136 | 157 | 0.659336 | false | 3.347859 | false | false | false |
mcdeaton13/dynamic | Python/dynamic/income.py | 2 | 8945 | '''
------------------------------------------------------------------------
Last updated 7/17/2015
Functions for created the matrix of ability levels, e. This can
only be used for looking at the 25, 50, 70, 80, 90, 99, and 100th
percentiles, as it uses fitted polynomials to those percentiles.
For a more generic version, see income_nopoly.py.
This file calls the following files:
utils.py
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/Demographics/ability_log
OUTPUT/Demographics/ability
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import utils
'''
------------------------------------------------------------------------
Generate Polynomials
------------------------------------------------------------------------
The following coefficients are for polynomials which fit ability data
for the 25, 50, 70, 80, 90, 99, and 100 percentiles. The data comes from
the following file:
data/ability/FR_wage_profile_tables.xlsx
the polynomials are of the form
log(ability) = constant + (one)(age) + (two)(age)^2 + (three)(age)^3
------------------------------------------------------------------------
'''
# Vals for: .25 .25 .2 .1 .1 .09 .01
one = np.array([-0.09720122, 0.05995294, 0.17654618, 0.21168263, 0.21638731, 0.04500235, 0.09229392])
two = np.array([0.00247639, -0.00004086, -0.00240656, -0.00306555, -0.00321041, 0.00094253, 0.00012902])
three = np.array([-0.00001842, -0.00000521, 0.00001039, 0.00001438, 0.00001579, -0.00001470, -0.00001169])
constant = np.array([3.41e+00, 0.69689692, -0.78761958, -1.11e+00, -0.93939272, 1.60e+00, 1.89e+00])
ages = np.linspace(21, 80, 60)
ages = np.tile(ages.reshape(60, 1), (1, 7))
income_profiles = constant + one * ages + two * ages ** 2 + three * ages ** 3
income_profiles = np.exp(income_profiles)
'''
------------------------------------------------------------------------
Generate ability type matrix
------------------------------------------------------------------------
Given desired starting and stopping ages, as well as the values for S
and J, the ability matrix is created. An arctan function is used
to extrapolate ability for ages 80-100.
------------------------------------------------------------------------
'''
def graph_income(S, J, e, starting_age, ending_age, bin_weights):
'''
Graphs the ability matrix (and it's log)
Inputs:
S = number of age groups (scalar)
J = number of ability types (scalar)
e = ability matrix (SxJ array)
starting_age = initial age (scalar)
ending_age = end age (scalar)
bin_weights = ability weights (Jx1 array)
Outputs:
OUTPUT/Demographics/ability_log.png
OUTPUT/Demographics/ability.png
'''
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
domain = np.linspace(starting_age, ending_age, S)
Jgrid = np.zeros(J)
for j in xrange(J):
Jgrid[j:] += bin_weights[j]
X, Y = np.meshgrid(domain, Jgrid)
cmap2 = matplotlib.cm.get_cmap('winter')
if J == 1:
plt.figure()
plt.plot(domain, np.log(e))
plt.savefig('OUTPUT/Demographics/ability_log')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, np.log(e).T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'log ability $log(e_j(s))$')
# plt.show()
plt.savefig('OUTPUT/Demographics/ability_log')
# 2D Version
fig112 = plt.figure()
ax = plt.subplot(111)
ax.plot(domain, np.log(e[:, 0]), label='0 - 24%', linestyle='-', color='black')
ax.plot(domain, np.log(e[:, 1]), label='25 - 49%', linestyle='--', color='black')
ax.plot(domain, np.log(e[:, 2]), label='50 - 69%', linestyle='-.', color='black')
ax.plot(domain, np.log(e[:, 3]), label='70 - 79%', linestyle=':', color='black')
ax.plot(domain, np.log(e[:, 4]), label='80 - 89%', marker='x', color='black')
ax.plot(domain, np.log(e[:, 5]), label='90 - 99%', marker='v', color='black')
ax.plot(domain, np.log(e[:, 6]), label='99 - 100%', marker='1', color='black')
ax.axvline(x=80, color='black', linestyle='--')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel(r'age-$s$')
ax.set_ylabel(r'log ability $log(e_j(s))$')
plt.savefig('OUTPUT/Demographics/ability_log_2D')
if J == 1:
plt.figure()
plt.plot(domain, e)
plt.savefig('OUTPUT/Demographics/ability')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, e.T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'ability $e_j(s)$')
plt.savefig('OUTPUT/Demographics/ability')
def arc_tan_func(points, a, b, c):
'''
Functional form for a generic arctan function
'''
y = (-a / np.pi) * np.arctan(b*points + c) + a / 2
return y
def arc_tan_deriv_func(points, a, b, c):
'''
Functional form for the derivative of a generic arctan function
'''
y = -a * b / (np.pi * (1+(b*points+c)**2))
return y
def arc_error(guesses, params):
'''
How well the arctan function fits the slope of ability matrix at age 80, the level at age 80, and the level of age 80 times a constant
'''
a, b, c = guesses
first_point, coef1, coef2, coef3, ability_depreciation = params
error1 = first_point - arc_tan_func(80, a, b, c)
if (3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1) < 0:
error2 = (3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1)*first_point - arc_tan_deriv_func(80, a, b, c)
else:
error2 = -.02 * first_point - arc_tan_deriv_func(80, a, b, c)
error3 = ability_depreciation * first_point - arc_tan_func(100, a, b, c)
error = [np.abs(error1)] + [np.abs(error2)] + [np.abs(error3)]
return error
def arc_tan_fit(first_point, coef1, coef2, coef3, ability_depreciation, init_guesses):
'''
Fits an arctan function to the last 20 years of the ability levels
'''
guesses = init_guesses
params = [first_point, coef1, coef2, coef3, ability_depreciation]
a, b, c = opt.fsolve(arc_error, guesses, params)
old_ages = np.linspace(81, 100, 20)
return arc_tan_func(old_ages, a, b, c)
def get_e(S, J, starting_age, ending_age, bin_weights, omega_SS, flag_graphs):
'''
Inputs:
S = Number of age cohorts (scalar)
J = Number of ability levels by age (scalar)
starting_age = age of first age cohort (scalar)
ending_age = age of last age cohort (scalar)
bin_weights = ability weights (Jx1 array)
omega_SS = population weights (Sx1 array)
flag_graphs = Graph flags or not (bool)
Output:
e = ability levels for each age cohort, normalized so
the weighted sum is one (SxJ array)
'''
e_short = income_profiles
e_final = np.ones((S, J))
e_final[:60, :] = e_short
e_final[60:, :] = 0.0
# This following variable is what percentage of ability at age 80 ability falls to at age 100.
# In general, we wanted people to lose half of their ability over a 20 year period. The first
# entry is .47, though, because nothing higher would converge. The second to last is .7 because this group
# actually has a slightly higher ability at age 80 then the last group, so this makes it decrease more so it
# ends monotonic.
ability_depreciation = np.array([.47, .5, .5, .5, .5, .7, .5])
# Initial guesses for the arctan. They're pretty sensitive.
init_guesses = np.array([[58, 0.0756438545595, -5.6940142786],
[27, 0.069, -5],
[35, .06, -5],
[37, 0.339936555352, -33.5987329144],
[70.5229181668, 0.0701993896947, -6.37746859905],
[35, .06, -5],
[35, .06, -5]])
for j in xrange(J):
e_final[60:, j] = arc_tan_fit(e_final[59, j], one[j], two[j], three[j], ability_depreciation[j], init_guesses[j])
if flag_graphs:
graph_income(S, J, e_final, starting_age, ending_age, bin_weights)
e_final /= (e_final * omega_SS.reshape(S, 1) * bin_weights.reshape(1, J)).sum()
return e_final
| mit | 6,937,819,019,961,098,000 | 40.221198 | 138 | 0.5545 | false | 3.341427 | false | false | false |
ctralie/S3DGLPy | LaplacianMesh.py | 1 | 10246 | from PolyMesh import *
from Primitives3D import *
from OpenGL.GL import *
import sys
import re
import math
import time
import numpy as np
from scipy import sparse
import scipy.io as sio
from scipy.sparse.linalg import lsqr, cg, eigsh
import matplotlib.pyplot as plt
import os
this_path = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(this_path + '/ext/lib') and os.path.exists(this_path + '/ext/libigl/python'):
sys.path.insert(0, this_path + '/ext/libigl/python/')
sys.path.insert(0, this_path + '/ext/lib/')
print "Importing IGL"
import igl
#Quickly compute sparse Laplacian matrix with cotangent weights and Voronoi areas
#by doing many operations in parallel using NumPy
#VPos: N x 3 array of vertex positions
#ITris: M x 3 array of triangle indices
#anchorsIdx: List of anchor positions
def makeLaplacianMatrixCotWeights(VPos, ITris, anchorsIdx, anchorWeights = 1):
N = VPos.shape[0]
M = ITris.shape[0]
#Allocate space for the sparse array storage, with 2 entries for every
#edge for every triangle (6 entries per triangle); one entry for directed
#edge ij and ji. Note that this means that edges with two incident triangles
#will have two entries per directed edge, but sparse array will sum them
I = np.zeros(M*6)
J = np.zeros(M*6)
V = np.zeros(M*6)
#Keep track of areas of incident triangles and the number of incident triangles
IA = np.zeros(M*3)
VA = np.zeros(M*3) #Incident areas
VC = 1.0*np.ones(M*3) #Number of incident triangles
#Step 1: Compute cotangent weights
for shift in range(3):
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
[i, j, k] = [shift, (shift+1)%3, (shift+2)%3]
dV1 = VPos[ITris[:, i], :] - VPos[ITris[:, k], :]
dV2 = VPos[ITris[:, j], :] - VPos[ITris[:, k], :]
Normal = np.cross(dV1, dV2)
#Cotangent is dot product / mag cross product
NMag = np.sqrt(np.sum(Normal**2, 1))
cotAlpha = np.sum(dV1*dV2, 1)/NMag
I[shift*M*2:shift*M*2+M] = ITris[:, i]
J[shift*M*2:shift*M*2+M] = ITris[:, j]
V[shift*M*2:shift*M*2+M] = cotAlpha
I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j]
J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i]
V[shift*M*2+M:shift*M*2+2*M] = cotAlpha
if shift == 0:
#Compute contribution of this triangle to each of the vertices
for k in range(3):
IA[k*M:(k+1)*M] = ITris[:, k]
VA[k*M:(k+1)*M] = 0.5*NMag
#Step 2: Create laplacian matrix
L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
#Create the diagonal by summing the rows and subtracting off the nondiagonal entries
L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L
#Scale each row by the incident areas
Areas = sparse.coo_matrix((VA, (IA, IA)), shape=(N, N)).tocsr()
Areas = Areas.todia().data.flatten()
Counts = sparse.coo_matrix((VC, (IA, IA)), shape=(N, N)).tocsr()
Counts = Counts.todia().data.flatten()
RowScale = sparse.dia_matrix((3*Counts/Areas, 0), L.shape)
L = L.T.dot(RowScale).T
#Step 3: Add anchors
L = L.tocoo()
I = L.row.tolist()
J = L.col.tolist()
V = L.data.tolist()
I = I + range(N, N+len(anchorsIdx))
J = J + anchorsIdx
V = V + [anchorWeights]*len(anchorsIdx)
L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr()
return L
#Use simple umbrella weights instead of cotangent weights
#VPos: N x 3 array of vertex positions
#ITris: M x 3 array of triangle indices
#anchorsIdx: List of anchor positions
def makeLaplacianMatrixUmbrellaWeights(VPos, ITris, anchorsIdx, anchorWeights = 1):
N = VPos.shape[0]
M = ITris.shape[0]
I = np.zeros(M*6)
J = np.zeros(M*6)
V = np.ones(M*6)
#Step 1: Set up umbrella entries
for shift in range(3):
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
[i, j, k] = [shift, (shift+1)%3, (shift+2)%3]
I[shift*M*2:shift*M*2+M] = ITris[:, i]
J[shift*M*2:shift*M*2+M] = ITris[:, j]
I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j]
J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i]
#Step 2: Create laplacian matrix
L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
L[L > 0] = 1
#Create the diagonal by summing the rows and subtracting off the nondiagonal entries
L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L
#Step 3: Add anchors
L = L.tocoo()
I = L.row.tolist()
J = L.col.tolist()
V = L.data.tolist()
I = I + range(N, N+len(anchorsIdx))
J = J + anchorsIdx
V = V + [anchorWeights]*len(anchorsIdx)
L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr()
return L
def solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights = 1):
y = np.concatenate((deltaCoords, anchorWeights*anchors), 0)
y = np.array(y, np.float64)
coo = L.tocoo()
coo = np.vstack((coo.row, coo.col, coo.data)).T
coo = igl.eigen.MatrixXd(np.array(coo, dtype=np.float64))
LE = igl.eigen.SparseMatrixd()
LE.fromCOO(coo)
Q = LE.transpose()*LE
start_time = time.time()
#solver = igl.eigen.SimplicialLLTsparse(Q)
solver = igl.eigen.CholmodSupernodalLLT(Q)
ret = solver.solve(igl.eigen.MatrixXd(y))
end_time = time.time()
print 'factorization elapsed time:',end_time-start_time,'seconds'
return np.array(ret)
#Make a QP solver with hard constraints
def makeLaplacianMatrixSolverIGLHard(VPos, ITris, anchorsIdx):
VPosE = igl.eigen.MatrixXd(VPos)
ITrisE = igl.eigen.MatrixXi(ITris)
L = igl.eigen.SparseMatrixd()
M = igl.eigen.SparseMatrixd()
M_inv = igl.eigen.SparseMatrixd()
igl.cotmatrix(VPosE,ITrisE,L)
igl.massmatrix(VPosE,ITrisE,igl.MASSMATRIX_TYPE_VORONOI,M)
igl.invert_diag(M,M_inv)
L = M_inv*L
deltaCoords = L*VPosE
deltaCoords = np.array(deltaCoords)
#Bi-laplacian
Q = L.transpose()*L
#Linear term with delta coordinates
#TODO: Finish this
#return (L, solver, deltaCoords)
def makeLaplacianMatrixSolverIGLSoft(VPos, ITris, anchorsIdx, anchorWeights, makeSolver = True):
VPosE = igl.eigen.MatrixXd(VPos)
ITrisE = igl.eigen.MatrixXi(ITris)
'''
#Doing this check slows things down by more than a factor of 2 (convert to numpy to make faster?)
for f in range(ITrisE.rows()):
v_list = ITrisE.row(f)
v1 = VPosE.row(v_list[0])
v2 = VPosE.row(v_list[1])
v3 = VPosE.row(v_list[2])
if (v1-v2).norm() < 1e-10 and (v1-v3).norm() < 1e-10 and (v2-v3).norm() < 1e-10:
print 'zero area triangle!',f
'''
L = igl.eigen.SparseMatrixd()
M = igl.eigen.SparseMatrixd()
M_inv = igl.eigen.SparseMatrixd()
igl.cotmatrix(VPosE,ITrisE,L)
igl.massmatrix(VPosE,ITrisE,igl.MASSMATRIX_TYPE_VORONOI,M)
#np.set_printoptions(threshold='nan')
#print 'what is M?',M.diagonal()
igl.invert_diag(M,M_inv)
#L = M_inv*L
deltaCoords = (M_inv*L)*VPosE
#TODO: What to do with decaying_anchor_weights?
'''
anchor_dists = []
for i in range(VPosE.rows()):
anchor_dists.append(min([ (VPosE.row(i)-VPosE.row(j)).norm() for j in anchorsIdx ]))
max_anchor_dist = max(anchor_dists)
# assume linear weighting for anchor weights -> we are 0 at the anchors, anchorWeights at max_anchor_dist
decaying_anchor_weights = []
for anchor_dist in anchor_dists:
decaying_anchor_weights.append(anchorWeights*(anchor_dist/max_anchor_dist))
'''
solver = None
if makeSolver:
Q = L*(M_inv*M_inv)*L
#Now add in sparse constraints
diagTerms = igl.eigen.SparseMatrixd(VPos.shape[0], VPos.shape[0])
# anchor points
for a in anchorsIdx:
diagTerms.insert(a, a, anchorWeights)
# off points
'''
for adx,decay_weight in enumerate(decaying_anchor_weights):
if decay_weight == 0:
diagTerms.insert(adx, adx, anchorWeights)
else:
diagTerms.insert(adx, adx, decay_weight)
'''
Q = Q + diagTerms
Q.makeCompressed()
start_time = time.time()
solver = igl.eigen.SimplicialLLTsparse(Q)
#solver = igl.eigen.CholmodSupernodalLLT(Q)
end_time = time.time()
print 'factorization elapsed time:',end_time-start_time,'seconds'
return (L, M_inv, solver, np.array(deltaCoords))
#solver: Eigen simplicialLLT solver that has Laplace Beltrami + anchors
#deltaCoords: numpy array of delta coordinates
#anchors: numpy array of anchor positions
#anchorWeights: weight of anchors
def solveLaplacianMatrixIGLSoft(solver, L, M_inv, deltaCoords, anchorsIdx, anchors, anchorWeights):
print "solveLaplacianMatrixIGLSoft: anchorWeights = %g"%anchorWeights
y = np.array(L*M_inv*igl.eigen.MatrixXd(np.array(deltaCoords, dtype=np.float64)))
y[anchorsIdx] += anchorWeights*anchors
y = igl.eigen.MatrixXd(y)
ret = solver.solve(y)
return np.array(ret)
if __name__ == '__main__2':
anchorWeights = 10000
m = PolyMesh()
m.loadOffFile("cow.off")
m.performDisplayUpdate()
X = sio.loadmat("anchors.mat")
anchors = X['anchors']
anchorsIdx = X['anchorsIdx'].flatten().tolist()
deltaCoords = X['deltaCoords']
L = makeLaplacianMatrixCotWeights(m.VPos, m.ITris, anchorsIdx, anchorWeights)
m.VPos = solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights)
m.saveOffFile("LapCow.off")
if __name__ == '__main__3':
anchorWeights = 100
m = getSphereMesh(1, 2)
print "BBox Before: ", m.getBBox()
m.performDisplayUpdate()
anchorsIdx = np.random.randint(0, len(m.vertices), 30).tolist()
L = makeLaplacianMatrixCotWeights(m.VPos, m.ITris, anchorsIdx, anchorWeights)
sio.savemat("L.mat", {"L":L})
deltaCoords = L.dot(m.VPos)[0:len(m.vertices), :]
anchors = m.VPos[anchorsIdx, :]
anchors = anchors*5
m.VPos = solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights)
print "BBox After:", m.getBBox()
m.saveOffFile("LapSphere.off")
| apache-2.0 | 6,239,854,272,775,887,000 | 37.374532 | 109 | 0.635565 | false | 2.948489 | false | false | false |
wooga/airflow | airflow/kubernetes/volume_mount.py | 1 | 2328 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Classes for interacting with Kubernetes API
"""
import copy
import kubernetes.client.models as k8s
from airflow.kubernetes.k8s_model import K8SModel
class VolumeMount(K8SModel):
"""
Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to
running container.
:param name: the name of the volume mount
:type name: str
:param mount_path:
:type mount_path: str
:param sub_path: subpath within the volume mount
:type sub_path: Optional[str]
:param read_only: whether to access pod with read-only mode
:type read_only: bool
"""
def __init__(self, name, mount_path, sub_path, read_only):
self.name = name
self.mount_path = mount_path
self.sub_path = sub_path
self.read_only = read_only
def to_k8s_client_obj(self) -> k8s.V1VolumeMount:
"""
Converts to k8s object.
:return Volume Mount k8s object
"""
return k8s.V1VolumeMount(
name=self.name,
mount_path=self.mount_path,
sub_path=self.sub_path,
read_only=self.read_only
)
def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:
"""
Attaches to pod
:return Copy of the Pod object
"""
cp_pod = copy.deepcopy(pod)
volume_mount = self.to_k8s_client_obj()
cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or []
cp_pod.spec.containers[0].volume_mounts.append(volume_mount)
return cp_pod
| apache-2.0 | 6,549,943,243,606,669,000 | 30.890411 | 92 | 0.670962 | false | 3.736758 | false | false | false |
cfarquhar/rpc-openstack | hacking/setup.py | 9 | 1326 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
version = '1.0.0'
setuptools.setup(
name='rpco-hacking-checks',
author='Rackspace Private Cloud',
description='Hacking/Flake8 checks for rpc-openstack',
version=version,
install_requires=['hacking'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
],
py_modules=['rpco_checks'],
provides=['rpco_checks'],
entry_points={
'flake8.extension': [
'rpco.git_title_bug = rpco_checks:OnceGitCheckCommitTitleBug',
('rpco.git_title_length = '
'rpco_checks:OnceGitCheckCommitTitleLength'),
('rpco.git_title_period = '
'rpco_checks:OnceGitCheckCommitTitlePeriodEnding'),
]
},
)
| apache-2.0 | -5,472,203,642,330,690,000 | 34.837838 | 76 | 0.671946 | false | 3.83237 | false | false | false |
jffifa/python-mysql-eventprocessor | mysqlevp/handler/base.py | 1 | 3795 | from __future__ import absolute_import, unicode_literals
from ..event.row_wrapper import InsertEventRow, UpdateEventRow, DeleteEventRow
class IEventHandler(object):
def on_insert_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(InsertEventRow(
ev_id=ev_id,
ev=ev,
new_values=row['values'],
))
self.on_insert(ev_id, ev_timestamp, schema, table, affected_rows)
def on_update_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(UpdateEventRow(
ev_id=ev_id,
ev=ev,
old_values=row['before_values'],
new_values=row['after_values'],
))
self.on_update(ev_id, ev_timestamp, schema, table, affected_rows)
def on_delete_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(DeleteEventRow(
ev_id=ev_id,
ev=ev,
old_values=row['values'],
))
self.on_delete(ev_id, ev_timestamp, schema, table, affected_rows)
pass
def on_insert(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.InsertEventRow
each of instance has an attr named "new_values",
which is a dict(whose key is MySQL column name) of the new inserted row
for row in affected_rows:
do_something(row.new_values)
"""
pass
def on_update(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.UpdateEventRow
each of instance has two attrs named "new_values" and "old_values",
which are dicts(whose key is MySQL column name) of the new inserted row and the old replaced row
for row in affected_rows:
do_something(row.new_values, row.old_values)
"""
pass
def on_delete(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.DeleteEventRow
each of instance has an attr named "old_values",
which is a dict(whose key is MySQL column name) of the deleted row
for row in affected_rows:
do_something(row.old_values)
"""
pass
def close(self):
"""allow user to release some resource
"""
pass
| mit | -8,347,637,732,043,205,000 | 38.53125 | 126 | 0.557312 | false | 4.327252 | false | false | false |
akabos/NearPy | nearpy/experiments/recallprecisionexperiment.py | 3 | 8530 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import numpy
import scipy
import time
from scipy.spatial.distance import cdist
from nearpy.utils import numpy_array_from_list_or_numpy_array
from nearpy.utils.utils import unitvec
class RecallPrecisionExperiment(object):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
perform_experiment() returns list of (recall, precision, search_time)
tuple. These are the averaged values over all request vectors. search_time
is the average retrieval/search time compared to the average exact search
time.
coverage_ratio determines how many of the vectors are used as query
vectors for exact andapproximated search. Because the search comparance
overhead is quite large, it is best with large data sets (>10000) to
use a low coverage_ratio (like 0.1) to make the experiment fast. A
coverage_ratio of 0.1 makes the experiment use 10% of all the vectors
for querying, that is, it looks for 10% of all vectors for the nearest
neighbours.
"""
def __init__(self, N, vectors, coverage_ratio=0.2):
"""
Performs exact nearest neighbour search on the data set.
vectors can either be a numpy matrix with all the vectors
as columns OR a python array containing the individual
numpy vectors.
"""
# We need a dict from vector string representation to index
self.vector_dict = {}
self.N = N
self.coverage_ratio = coverage_ratio
numpy_vectors = numpy_array_from_list_or_numpy_array(vectors)
# Get numpy array representation of input
self.vectors = numpy.vstack([unitvec(v) for v in numpy_vectors.T])
# Build map from vector string representation to vector
for index, v in enumerate(self.vectors):
self.vector_dict[self.__vector_to_string(v)] = index
# Determine the indices of query vectors used for comparance
# with approximated search.
query_count = numpy.floor(self.coverage_ratio *
len(self.vectors))
self.query_indices = []
for k in range(int(query_count)):
index = numpy.floor(k * (float(len(self.vectors)) / query_count))
index = min(index, len(self.vectors) - 1)
self.query_indices.append(int(index))
print('\nStarting exact search (query set size=%d)...\n' % query_count)
# For each query vector get the closest N neighbours
self.closest = {}
self.exact_search_time_per_vector = 0.0
for index in self.query_indices:
v = self.vectors[index, numpy.newaxis]
exact_search_start_time = time.time()
D = cdist(v, self.vectors, 'euclidean')
self.closest[index] = scipy.argsort(D)[0, 1:N+1]
# Save time needed for exact search
exact_search_time = time.time() - exact_search_start_time
self.exact_search_time_per_vector += exact_search_time
print('Done with exact search...\n')
# Normalize search time
self.exact_search_time_per_vector /= float(len(self.query_indices))
def perform_experiment(self, engine_list):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
"""
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for endine_idx, engine in enumerate(engine_list):
print('Engine %d / %d' % (endine_idx, len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average recall
avg_recall = 0.0
# Use this to compute average precision
avg_precision = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index, v in enumerate(self.vectors):
engine.store_vector(v, 'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# Get indices of the real nearest as set
real_nearest = set(self.closest[index])
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[index])
# Get search time
search_time = time.time() - search_time_start
# For comparance we need their indices (as set)
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
# Remove query index from search result to make sure that
# recall and precision make sense in terms of "neighbours".
# If ONLY the query vector is retrieved, we want recall to be
# zero!
nearest.remove(index)
# If the result list is empty, recall and precision are 0.0
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
# Get intersection count
inter_count = float(len(real_nearest & nearest))
# Normalize recall for this vector
recall = inter_count/float(len(real_nearest))
# Normalize precision for this vector
precision = inter_count/float(len(nearest))
# Add to accumulator
avg_recall += recall
# Add to accumulator
avg_precision += precision
# Add to accumulator
avg_search_time += search_time
# Normalize recall over query set
avg_recall /= float(len(self.query_indices))
# Normalize precision over query set
avg_precision /= float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' recall=%f, precision=%f, time=%f' % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
# Return (recall, precision, search_time) tuple
return result
def __vector_to_string(self, vector):
""" Returns string representation of vector. """
return numpy.array_str(numpy.round(unitvec(vector), decimals=3))
def __index_of_vector(self, vector):
""" Returns index of specified vector from test data set. """
return self.vector_dict[self.__vector_to_string(vector)]
| mit | -2,381,465,748,599,920,000 | 40.009615 | 79 | 0.61993 | false | 4.5228 | false | false | false |
Eksmo/calibre | src/calibre/ebooks/markdown/extensions/rss.py | 5 | 3753 | import calibre.ebooks.markdown.markdown as markdown
from calibre.ebooks.markdown.markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
| gpl-3.0 | 1,401,059,289,872,084,500 | 31.921053 | 77 | 0.494271 | false | 4.018201 | true | false | false |
cgwalters/pykickstart | pykickstart/commands/firewall.py | 8 | 9004 | #
# Chris Lumens <[email protected]>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
from pykickstart.i18n import _
class FC3_Firewall(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.enabled = kwargs.get("enabled", None)
self.ports = kwargs.get("ports", [])
self.trusts = kwargs.get("trusts", [])
def __str__(self):
extra = []
filteredPorts = []
retval = KickstartCommand.__str__(self)
if self.enabled is None:
return retval
if self.enabled:
# It's possible we have words in the ports list instead of
# port:proto (s-c-kickstart may do this). So, filter those
# out into their own list leaving what we expect.
for port in self.ports:
if port == "ssh":
extra.append(" --ssh")
elif port == "telnet":
extra.append(" --telnet")
elif port == "smtp":
extra.append(" --smtp")
elif port == "http":
extra.append(" --http")
elif port == "ftp":
extra.append(" --ftp")
else:
filteredPorts.append(port)
# All the port:proto strings go into a comma-separated list.
portstr = ",".join(filteredPorts)
if len(portstr) > 0:
portstr = " --port=" + portstr
else:
portstr = ""
extrastr = "".join(extra)
truststr = ",".join(self.trusts)
if len(truststr) > 0:
truststr = " --trust=" + truststr
# The output port list consists only of port:proto for
# everything that we don't recognize, and special options for
# those that we do.
retval += "# Firewall configuration\nfirewall --enabled%s%s%s\n" % (extrastr, portstr, truststr)
else:
retval += "# Firewall configuration\nfirewall --disabled\n"
return retval
def _getParser(self):
def firewall_port_cb (option, opt_str, value, parser):
for p in value.split(","):
p = p.strip()
if p.find(":") == -1:
p = "%s:tcp" % p
parser.values.ensure_value(option.dest, []).append(p)
op = KSOptionParser(mapping={"ssh":["22:tcp"], "telnet":["23:tcp"],
"smtp":["25:tcp"], "http":["80:tcp", "443:tcp"],
"ftp":["21:tcp"]})
op.add_option("--disable", "--disabled", dest="enabled",
action="store_false")
op.add_option("--enable", "--enabled", dest="enabled",
action="store_true", default=True)
op.add_option("--ftp", "--http", "--smtp", "--ssh", "--telnet",
dest="ports", action="map_extend")
op.add_option("--high", deprecated=1)
op.add_option("--medium", deprecated=1)
op.add_option("--port", dest="ports", action="callback",
callback=firewall_port_cb, nargs=1, type="string")
op.add_option("--trust", dest="trusts", action="append")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 0:
mapping = {"command": "firewall", "options": extra}
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("Unexpected arguments to %(command)s command: %(options)s") % mapping))
self._setToSelf(self.op, opts)
return self
class F9_Firewall(FC3_Firewall):
removedKeywords = FC3_Firewall.removedKeywords
removedAttrs = FC3_Firewall.removedAttrs
def _getParser(self):
op = FC3_Firewall._getParser(self)
op.remove_option("--high")
op.remove_option("--medium")
return op
class F10_Firewall(F9_Firewall):
removedKeywords = F9_Firewall.removedKeywords
removedAttrs = F9_Firewall.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F9_Firewall.__init__(self, writePriority, *args, **kwargs)
self.services = kwargs.get("services", [])
def __str__(self):
if self.enabled is None:
return ""
retval = F9_Firewall.__str__(self)
if self.enabled:
retval = retval.strip()
svcstr = ",".join(self.services)
if len(svcstr) > 0:
svcstr = " --service=" + svcstr
else:
svcstr = ""
return retval + "%s\n" % svcstr
else:
return retval
def _getParser(self):
def service_cb (option, opt_str, value, parser):
# python2.4 does not support action="append_const" that we were
# using for these options. Instead, we have to fake it by
# appending whatever the option string is to the service list.
if not value:
parser.values.ensure_value(option.dest, []).append(opt_str[2:])
return
for p in value.split(","):
p = p.strip()
parser.values.ensure_value(option.dest, []).append(p)
op = F9_Firewall._getParser(self)
op.add_option("--service", dest="services", action="callback",
callback=service_cb, nargs=1, type="string")
op.add_option("--ftp", dest="services", action="callback",
callback=service_cb)
op.add_option("--http", dest="services", action="callback",
callback=service_cb)
op.add_option("--smtp", dest="services", action="callback",
callback=service_cb)
op.add_option("--ssh", dest="services", action="callback",
callback=service_cb)
op.add_option("--telnet", deprecated=1)
return op
class F14_Firewall(F10_Firewall):
removedKeywords = F10_Firewall.removedKeywords + ["telnet"]
removedAttrs = F10_Firewall.removedAttrs + ["telnet"]
def _getParser(self):
op = F10_Firewall._getParser(self)
op.remove_option("--telnet")
return op
class F20_Firewall(F14_Firewall):
def __init__(self, writePriority=0, *args, **kwargs):
F14_Firewall.__init__(self, writePriority, *args, **kwargs)
self.remove_services = kwargs.get("remove_services", [])
def _getParser(self):
def remove_service_cb(option, opt_str, value, parser):
# python2.4 does not support action="append_const" that we were
# using for these options. Instead, we have to fake it by
# appending whatever the option string is to the service list.
if not value:
parser.values.ensure_value(option.dest, []).append(opt_str[2:])
return
for p in value.split(","):
p = p.strip()
parser.values.ensure_value(option.dest, []).append(p)
op = F14_Firewall._getParser(self)
op.add_option("--remove-service", dest="remove_services",
action="callback", callback=remove_service_cb,
nargs=1, type="string")
return op
def __str__(self):
if self.enabled is None:
return ""
retval = F10_Firewall.__str__(self)
if self.enabled:
retval = retval.strip()
svcstr = ",".join(self.remove_services)
if len(svcstr) > 0:
svcstr = " --remove-service=" + svcstr
else:
svcstr = ""
return retval + "%s\n" % svcstr
else:
return retval
| gpl-2.0 | -2,215,309,625,346,987,800 | 37.314894 | 143 | 0.564416 | false | 4.16659 | false | false | false |
sascha-m-b/diffuse | install.py | 1 | 9113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010 Derrick Moser <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the licence, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. You may also obtain a copy of the GNU General Public License
# from the Free Software Foundation by visiting their web site
# (http://www.fsf.org/) or by writing to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import glob
import os
import stat
import subprocess
import sys
app_path = sys.argv[0]
# print a message to stderr
def logError(s):
sys.stderr.write('%s: %s\n' % (app_path, s))
# this install script should not be used on Windows
if os.name == 'nt':
logError('Wrong platform. Use scripts from the "windows-installer" directory instead.')
sys.exit(1)
# reset the umask so files we create will have the expected permissions
os.umask(stat.S_IWGRP | stat.S_IWOTH)
# option defaults
options = { 'destdir': '/',
'prefix': '/usr/local/',
'sysconfdir': '/etc/',
'examplesdir': '${sysconfdir}',
'mandir': '${prefix}/share/man/',
'pythonbin': '/usr/bin/env python' }
install = True
files_only = False
# process --help option
if len(sys.argv) == 2 and sys.argv[1] == '--help':
print """Usage: %s [OPTION...]
Install or remove Diffuse.
Options:
--help
print this help text and quit
--remove
remove the program
--destdir=PATH
path to the installation's root directory
default: %s
--prefix=PATH
common installation prefix for files
default: %s
--sysconfdir=PATH
directory for installing read-only single-machine data
default: %s
--examplesdir=PATH
directory for example configuration files
default: %s
--mandir=PATH
directory for man pages
default: %s
--pythonbin=PATH
command for python interpreter
default: %s
--files-only
only install/remove files; skip the post install/removal tasks""" % (app_path, options['destdir'], options['prefix'], options['sysconfdir'], options['examplesdir'], options['mandir'], options['pythonbin'])
sys.exit(0)
# returns the list of components used in a path
def components(s):
return [ p for p in s.split(os.sep) if p != '' ]
# returns a relative path from 'src' to 'dst'
def relpath(src, dst):
s1, s2, i = components(src), components(dst), 0
while i < len(s1) and i < len(s2) and s1[i] == s2[i]:
i += 1
s = [ os.pardir ] * (len(s1) - i)
s.extend(s2[i:])
return os.sep.join(s)
# apply a set of text substitution rules on a string
def replace(s, rules, i=0):
if i < len(rules):
k, v = rules[i]
a = s.split(k)
for j in range(len(a)):
a[j] = replace(a[j], rules, i + 1)
s = v.join(a)
return s
# create directories
def createDirs(d):
p = os.sep
for c in components(d):
p = os.path.join(p, c)
if not os.path.isdir(p):
os.mkdir(p)
# remove a file
def removeFile(f):
try:
os.unlink(f)
except OSError:
logError('Error removing "%s".' % (f, ))
# install/remove sets of files
def processFiles(install, dst, src, template):
for k, v in template.items():
for s in glob.glob(os.path.join(src, k)):
d = s.replace(src, dst, 1)
if install:
createDirs(os.path.dirname(d))
# install file
f = open(s, 'rb')
c = f.read()
f.close()
if v is not None:
c = replace(c, v)
print 'Installing %s' % (d, )
f = open(d, 'wb')
f.write(c)
f.close()
if k == 'bin/diffuse':
# turn on the execute bits
os.chmod(d, 0755)
else:
# remove file
removeFile(d)
# compile .po files and install
def processTranslations(install, dst):
for s in glob.glob('translations/*.po'):
lang = s[13:-3]
d = os.path.join(dst, 'share/locale/%s/LC_MESSAGES/diffuse.mo' % (lang, ))
if install:
# install file
try:
print 'Installing %s' % (d, )
createDirs(os.path.dirname(d))
if subprocess.Popen(['msgfmt', '-o', d, s]).wait() != 0:
raise OSError()
except OSError:
logError('WARNING: Failed to compile "%s" localisation.' % (lang, ))
else:
# remove file
removeFile(d)
# parse command line arguments
for arg in sys.argv[1:]:
if arg == '--remove':
install = False
elif arg == '--files-only':
files_only = True
else:
for opt in options.keys():
key = '--%s=' % (opt, )
if arg.startswith(key):
options[opt] = arg[len(key):]
break
else:
logError('Unknown option "%s".' % (arg, ))
sys.exit(1)
# expand variables
for s in 'sysconfdir', 'examplesdir', 'mandir':
for k in 'prefix', 'sysconfdir':
if s != k:
options[s] = options[s].replace('${%s}' % (k, ), options[k])
# validate inputs
if options['destdir'] == '':
options['destdir'] = '/'
for opt in 'prefix', 'sysconfdir', 'examplesdir', 'mandir':
p = options[opt]
c = components(p)
if os.pardir in c or os.curdir in c:
logError('Bad value for option "%s".' % (opt, ))
sys.exit(1)
c.insert(0, '')
c.append('')
options[opt] = os.sep.join(c)
destdir = options['destdir']
prefix = options['prefix']
sysconfdir = options['sysconfdir']
examplesdir = options['examplesdir']
mandir = options['mandir']
pythonbin = options['pythonbin']
# tell the user what we are about to do
if install:
stage = 'install'
else:
stage = 'removal'
print '''Performing %s with:
destdir=%s
prefix=%s
sysconfdir=%s
examplesdir=%s
mandir=%s
pythonbin=%s''' % (stage, destdir, prefix, sysconfdir, examplesdir, mandir, pythonbin)
# install files to prefix
processFiles(install, os.path.join(destdir, prefix[1:]), 'src/usr/', {
'bin/diffuse': [ ("'../../etc/diffuserc'", repr(relpath(os.path.join(prefix, 'bin'), os.path.join(sysconfdir, 'diffuserc')))), ('/usr/bin/env python', pythonbin) ],
'share/applications/diffuse.desktop': None,
'share/diffuse/syntax/*.syntax': None,
'share/gnome/help/diffuse/*/diffuse.xml': [ ('/usr/', prefix), ('/etc/', sysconfdir) ],
'share/omf/diffuse/diffuse-*.omf': [ ('/usr/', prefix) ],
'share/icons/hicolor/*/apps/diffuse.png': None
})
# install manual
processFiles(install, os.path.join(destdir, mandir[1:]), 'src/usr/share/man/', {
'man1/diffuse.1': [ ('/usr/', prefix), ('/etc/', sysconfdir) ],
'*/man1/diffuse.1': [ ('/usr/', prefix), ('/etc/', sysconfdir) ]
})
# install files to sysconfdir
processFiles(install, os.path.join(destdir, examplesdir[1:]), 'src/etc/', { 'diffuserc': [ ('/etc/', sysconfdir), ('../usr', relpath(sysconfdir, prefix)) ] })
# install translations
processTranslations(install, os.path.join(destdir, prefix[1:]))
if not install:
# remove directories we own
for s in 'share/omf/diffuse', 'share/gnome/help/diffuse/C', 'share/gnome/help/diffuse/ru', 'share/gnome/help/diffuse', 'share/diffuse/syntax', 'share/diffuse':
d = os.path.join(destdir, os.path.join(prefix, s)[1:])
try:
os.rmdir(d)
except OSError:
logError('Error removing "%s".' % (d, ))
# do post install/removal tasks
if not files_only:
print 'Performing post %s tasks.' % (stage, )
cmds = [ [ 'update-desktop-database' ],
[ 'gtk-update-icon-cache', os.path.join(destdir, os.path.join(prefix, 'icons/hicolor')[1:]) ] ]
if install:
cmds.append([ 'scrollkeeper-update', '-q', '-o', os.path.join(destdir, os.path.join(prefix, 'share/omf/diffuse')[1:]) ])
else:
cmds.append([ 'scrollkeeper-update', '-q' ])
for c in cmds:
for p in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(p, c[0])):
print ' '.join(c)
try:
if subprocess.Popen(c).wait() != 0:
raise OSError()
except OSError:
logError('WARNING: Failed to update documentation database with %s.' % (c[0], ))
break
else:
print 'WARNING: %s is not installed' % (c[0], )
| gpl-2.0 | 8,869,225,218,684,423,000 | 31.546429 | 210 | 0.581587 | false | 3.492909 | false | false | false |
janeczku/calibre-web | cps/render_template.py | 2 | 7747 | # -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2018-2020 OzzieIsaacs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import render_template
from flask_babel import gettext as _
from flask import g
from werkzeug.local import LocalProxy
from flask_login import current_user
from . import config, constants, ub, logger, db, calibre_db
from .ub import User
log = logger.create()
def get_sidebar_config(kwargs=None):
kwargs = kwargs or []
if 'content' in kwargs:
content = kwargs['content']
content = isinstance(content, (User, LocalProxy)) and not content.role_anonymous()
else:
content = 'conf' in kwargs
sidebar = list()
sidebar.append({"glyph": "glyphicon-book", "text": _('Books'), "link": 'web.index', "id": "new",
"visibility": constants.SIDEBAR_RECENT, 'public': True, "page": "root",
"show_text": _('Show recent books'), "config_show":False})
sidebar.append({"glyph": "glyphicon-fire", "text": _('Hot Books'), "link": 'web.books_list', "id": "hot",
"visibility": constants.SIDEBAR_HOT, 'public': True, "page": "hot",
"show_text": _('Show Hot Books'), "config_show": True})
if current_user.role_admin():
sidebar.append({"glyph": "glyphicon-download", "text": _('Downloaded Books'), "link": 'web.download_list',
"id": "download", "visibility": constants.SIDEBAR_DOWNLOAD, 'public': (not g.user.is_anonymous),
"page": "download", "show_text": _('Show Downloaded Books'),
"config_show": content})
else:
sidebar.append({"glyph": "glyphicon-download", "text": _('Downloaded Books'), "link": 'web.books_list',
"id": "download", "visibility": constants.SIDEBAR_DOWNLOAD, 'public': (not g.user.is_anonymous),
"page": "download", "show_text": _('Show Downloaded Books'),
"config_show": content})
sidebar.append(
{"glyph": "glyphicon-star", "text": _('Top Rated Books'), "link": 'web.books_list', "id": "rated",
"visibility": constants.SIDEBAR_BEST_RATED, 'public': True, "page": "rated",
"show_text": _('Show Top Rated Books'), "config_show": True})
sidebar.append({"glyph": "glyphicon-eye-open", "text": _('Read Books'), "link": 'web.books_list', "id": "read",
"visibility": constants.SIDEBAR_READ_AND_UNREAD, 'public': (not g.user.is_anonymous),
"page": "read", "show_text": _('Show read and unread'), "config_show": content})
sidebar.append(
{"glyph": "glyphicon-eye-close", "text": _('Unread Books'), "link": 'web.books_list', "id": "unread",
"visibility": constants.SIDEBAR_READ_AND_UNREAD, 'public': (not g.user.is_anonymous), "page": "unread",
"show_text": _('Show unread'), "config_show": False})
sidebar.append({"glyph": "glyphicon-random", "text": _('Discover'), "link": 'web.books_list', "id": "rand",
"visibility": constants.SIDEBAR_RANDOM, 'public': True, "page": "discover",
"show_text": _('Show Random Books'), "config_show": True})
sidebar.append({"glyph": "glyphicon-inbox", "text": _('Categories'), "link": 'web.category_list', "id": "cat",
"visibility": constants.SIDEBAR_CATEGORY, 'public': True, "page": "category",
"show_text": _('Show category selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-bookmark", "text": _('Series'), "link": 'web.series_list', "id": "serie",
"visibility": constants.SIDEBAR_SERIES, 'public': True, "page": "series",
"show_text": _('Show series selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-user", "text": _('Authors'), "link": 'web.author_list', "id": "author",
"visibility": constants.SIDEBAR_AUTHOR, 'public': True, "page": "author",
"show_text": _('Show author selection'), "config_show": True})
sidebar.append(
{"glyph": "glyphicon-text-size", "text": _('Publishers'), "link": 'web.publisher_list', "id": "publisher",
"visibility": constants.SIDEBAR_PUBLISHER, 'public': True, "page": "publisher",
"show_text": _('Show publisher selection'), "config_show":True})
sidebar.append({"glyph": "glyphicon-flag", "text": _('Languages'), "link": 'web.language_overview', "id": "lang",
"visibility": constants.SIDEBAR_LANGUAGE, 'public': (g.user.filter_language() == 'all'),
"page": "language",
"show_text": _('Show language selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-star-empty", "text": _('Ratings'), "link": 'web.ratings_list', "id": "rate",
"visibility": constants.SIDEBAR_RATING, 'public': True,
"page": "rating", "show_text": _('Show ratings selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-file", "text": _('File formats'), "link": 'web.formats_list', "id": "format",
"visibility": constants.SIDEBAR_FORMAT, 'public': True,
"page": "format", "show_text": _('Show file formats selection'), "config_show": True})
sidebar.append(
{"glyph": "glyphicon-trash", "text": _('Archived Books'), "link": 'web.books_list', "id": "archived",
"visibility": constants.SIDEBAR_ARCHIVED, 'public': (not g.user.is_anonymous), "page": "archived",
"show_text": _('Show archived books'), "config_show": content})
sidebar.append(
{"glyph": "glyphicon-th-list", "text": _('Books List'), "link": 'web.books_table', "id": "list",
"visibility": constants.SIDEBAR_LIST, 'public': (not g.user.is_anonymous), "page": "list",
"show_text": _('Show Books List'), "config_show": content})
return sidebar
def get_readbooks_ids():
if not config.config_read_column:
readBooks = ub.session.query(ub.ReadBook).filter(ub.ReadBook.user_id == int(current_user.id))\
.filter(ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED).all()
return frozenset([x.book_id for x in readBooks])
else:
try:
readBooks = calibre_db.session.query(db.cc_classes[config.config_read_column])\
.filter(db.cc_classes[config.config_read_column].value == True).all()
return frozenset([x.book for x in readBooks])
except (KeyError, AttributeError):
log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column)
return []
# Returns the template for rendering and includes the instance name
def render_title_template(*args, **kwargs):
sidebar = get_sidebar_config(kwargs)
return render_template(instance=config.config_calibre_web_title, sidebar=sidebar,
accept=constants.EXTENSIONS_UPLOAD, read_book_ids=get_readbooks_ids(),
*args, **kwargs)
| gpl-3.0 | -2,720,415,120,231,646,700 | 62.5 | 120 | 0.602039 | false | 3.697852 | true | false | false |
wilsonfreitas/scraps | examples/igpm.py | 1 | 2163 |
# /html/body/div[1]/div[3]/table/tbody/tr/td/div/center/table/tbody/tr[1]
import scraps
import textparser
import itertools
class NumberParser(textparser.TextParser):
def parse_int(self, text, match):
r'^\d+$'
return eval(text)
def parse_number_ptBR_with_percent(self, text, match):
r'^-?\s*((\d+[\.])+)?\d+[,]\d+%$'
text = text.replace('%', '')
text = text.replace('.', '')
text = text.replace(',', '.')
return eval(text)*100
def parse_number_ptBR_with_thousands(self, text, match):
r'^-?\s*((\d+[\.])+)?\d+[,]\d+?$'
text = text.replace('.', '')
text = text.replace(',', '.')
return eval(text)
def parseText(self, text):
return 'NA'
def month_pt2en(mes):
mes = mes.lower()
return {'fev':'feb', 'abr':'apr', 'mai':'may', 'ago':'aug', 'set':'sep', 'out':'oct'}.get(mes, mes)
def month_pt2number(mes):
mes = mes.lower()
return {
'jan':1, 'fev':2, 'mar':3, 'abr':4, 'mai':5, 'jun':6, 'jul':7, 'ago':8, 'set':9, 'out':10, 'nov':11,'dez':12
}.get(mes)
number_parser = NumberParser()
class IGPMScrap(scraps.Scrap):
colnames = scraps.Attribute(xpath='//table[3]/tr[1]/td', apply=[month_pt2en])
rownames = scraps.Attribute(xpath='//table[3]/tr[position()>1]/td[1]')
data = scraps.Attribute(xpath='//table[3]/tr[position()>1]/td[position()>1]', apply=[number_parser.parse])
# ----------------
class IGPMScrap(scraps.Scrap):
colnames = scraps.Attribute(xpath='//table[4]/*/tr[1]/td[position()>1]', apply=[month_pt2number])
rownames = scraps.Attribute(xpath='//table[4]/*/tr[position()>1]/td[1]', apply=[number_parser.parse])
data = scraps.Attribute(xpath='//table[4]/*/tr[position()>1]/td[position()>1]')#, apply=[number_parser.parse])
class IGPMFetcher(scraps.Fetcher):
scrapclass = IGPMScrap
url = 'http://www.portalbrasil.net/igpm.htm'
fetcher = IGPMFetcher()
res = fetcher.fetch()
# print(res.colnames)
# print(res.rownames)
# print(res.data)
# for month, rate in zip(list(itertools.product(res.rownames, res.colnames)), res.data):
# print(month + (rate,)) | mit | 3,307,421,219,416,186,400 | 27.853333 | 116 | 0.588072 | false | 2.816406 | false | false | false |
facebookresearch/fastText | python/doc/examples/FastTextEmbeddingBag.py | 1 | 2816 | #!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This requires PyTorch! We do not provide installation scripts to install PyTorch.
# It is up to you to install this dependency if you want to execute this example.
# PyTorch's website should give you clear instructions on this: http://pytorch.org/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from torch.nn.modules.sparse import EmbeddingBag
import numpy as np
import torch
import random
import string
import time
from fasttext import load_model
from torch.autograd import Variable
class FastTextEmbeddingBag(EmbeddingBag):
def __init__(self, model_path):
self.model = load_model(model_path)
input_matrix = self.model.get_input_matrix()
input_matrix_shape = input_matrix.shape
super().__init__(input_matrix_shape[0], input_matrix_shape[1])
self.weight.data.copy_(torch.FloatTensor(input_matrix))
def forward(self, words):
word_subinds = np.empty([0], dtype=np.int64)
word_offsets = [0]
for word in words:
_, subinds = self.model.get_subwords(word)
word_subinds = np.concatenate((word_subinds, subinds))
word_offsets.append(word_offsets[-1] + len(subinds))
word_offsets = word_offsets[:-1]
ind = Variable(torch.LongTensor(word_subinds))
offsets = Variable(torch.LongTensor(word_offsets))
return super().forward(ind, offsets)
def random_word(N):
return ''.join(
random.choices(
string.ascii_uppercase + string.ascii_lowercase + string.digits,
k=N
)
)
if __name__ == "__main__":
ft_emb = FastTextEmbeddingBag("fil9.bin")
model = load_model("fil9.bin")
num_lines = 200
total_seconds = 0.0
total_words = 0
for _ in range(num_lines):
words = [
random_word(random.randint(1, 10))
for _ in range(random.randint(15, 25))
]
total_words += len(words)
words_average_length = sum([len(word) for word in words]) / len(words)
start = time.clock()
words_emb = ft_emb(words)
total_seconds += (time.clock() - start)
for i in range(len(words)):
word = words[i]
ft_word_emb = model.get_word_vector(word)
py_emb = np.array(words_emb[i].data)
assert (np.isclose(ft_word_emb, py_emb).all())
print(
"Avg. {:2.5f} seconds to build embeddings for {} lines with a total of {} words.".
format(total_seconds, num_lines, total_words)
)
| mit | 9,063,231,506,715,134,000 | 33.765432 | 90 | 0.637429 | false | 3.657143 | false | false | false |
bcarroll/authmgr | authmgr/authmgr/user_directory/views.py | 1 | 4151 | import datetime as dt
import logging
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask import Markup
from flask_login import fresh_login_required, login_user, logout_user, current_user
from authmgr.extensions import login_manager
from authmgr.utils import flash_errors
from authmgr.extensions import db
from authmgr.user_directory.forms import UserDirectoryForm
from authmgr.user_directory.models import UserDirectory
blueprint = Blueprint('user_directory', __name__, url_prefix='/user_directory', static_folder='../static')
#User Directory views
@blueprint.route('/')
@fresh_login_required
def list():
"""
List all User Directories
"""
user_directories = UserDirectory.query.order_by('name').all()
return render_template('user_directory/user_directories.html', user_directories=user_directories, title='User Directories')
@blueprint.route('/add', methods=['GET', 'POST'])
@fresh_login_required
def add():
"""
Add a User Directory
"""
add_user_directory = True
form = UserDirectoryForm()
if form.validate_on_submit():
user_directory = UserDirectory(
directory_type=form.directory_type.data,
name=form.name.data,
description=form.description.data,
hostname=form.hostname.data,
port=form.port.data,
use_ssl=form.use_ssl.data,
basedn=form.basedn.data,
username=form.username.data,
password=form.password.data,
user_id=current_user.username,
updated_at=dt.datetime.utcnow()
)
try:
# add user_directory to the database
db.session.add(user_directory)
db.session.commit()
flash('User Directory added.', 'success')
except:
#logger.error(str(traceback.print_exc()))
# in case User Directory name already exists
flash('Error: User Directory already exists.', 'warning')
# redirect to the User Directory list page
return redirect(url_for('user_directory.list'))
# load setting template
return render_template('user_directory/user_directory.html', add_user_directory=add_user_directory, form=form, title='Add User Directory')
@blueprint.route('/edit/<int:id>', methods=['GET', 'POST'])
@fresh_login_required
def edit(id):
"""
Edit a User Directory
"""
add_user_directory = False
user_directory = UserDirectory.query.get_or_404(id)
form = UserDirectoryForm(obj=user_directory)
if form.validate_on_submit():
user_directory.directory_type=form.directory_type.data,
user_directory.name=form.name.data,
user_directory.description=form.description.data,
user_directory.hostname=form.hostname.data,
user_directory.port=form.port.data,
user_directory.use_ssl=form.use_ssl.data,
user_directory.basedn=form.basedn.data,
user_directory.username=form.username.data,
user_directory.password=form.password.data
user_directory.user_id=current_user.username
user_directory.updated_at=dt.datetime.utcnow()
db.session.commit()
flash('User Directory updated.', 'success')
# redirect to the user_directories page
return redirect(url_for('user_directory.list'))
return render_template('/user_directory.html', add_user_directory=add_user_directory, form=form, title='Edit User Directory')
@blueprint.route('/user_directories/delete/<int:id>', methods=['GET', 'POST'])
@fresh_login_required
def delete(id):
"""
Delete a User Directory from the database
"""
user_directory = UserDirectory.query.get_or_404(id)
db.session.delete(user_directory)
db.session.commit()
flash('User Directory deleted.', 'success')
# redirect to the roles page
return redirect(url_for('user_directory.list'))
| bsd-3-clause | -3,099,100,461,342,910,000 | 36.736364 | 142 | 0.63575 | false | 4.197169 | false | false | false |
DFO-Ocean-Navigator/Ocean-Data-Map-Project | plotting/colormap.py | 1 | 11585 | import os
import re
from io import BytesIO
import cmocean
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
from flask_babel import gettext
import plotting
def make_colormap(seq):
"""
Return a LinearSegmentedColormap: http://stackoverflow.com/a/16836182
Args:
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def find_colormap(name):
try:
return colormaps[name.lower()]
except KeyError:
for key in colormaps:
if re.search(key, name, re.I):
return colormaps[key]
return colormaps['mercator']
_c = mcolors.ColorConverter().to_rgb
data_dir = os.path.join(os.path.dirname(plotting.__file__), 'data')
colormaps = {
'water velocity bearing': cmocean.cm.rain,
'depth excess': cmocean.cm.deep,
'ammonium concentration': cmocean.cm.matter,
'nitrogen': cmocean.cm.balance,
'dissolved organic nitrogen concentration': cmocean.cm.amp,
'particulate organic nitrogen concentration': cmocean.cm.amp,
'depth': cmocean.cm.deep,
'deep': cmocean.cm.deep,
'partial pressure': cmocean.cm.matter,
'primary production': cmocean.cm.algae,
'temp gradient': cmocean.cm.thermal,
'heat': cmocean.cm.thermal,
'density': cmocean.cm.dense,
'curl': cmocean.cm.curl,
'vorticity': cmocean.cm.curl,
'divergence': cmocean.cm.curl,
'bathymetry': cmocean.cm.deep,
'salinity': cmocean.cm.haline,
'speed': cmocean.cm.speed,
'speed of current': cmocean.cm.speed,
'freesurface': cmocean.cm.balance,
'free surface': cmocean.cm.balance,
'surface height': cmocean.cm.balance,
'surface elevation': cmocean.cm.balance,
'velocity': cmocean.cm.delta,
'eastward current': cmocean.cm.delta,
'northward current': cmocean.cm.delta,
'waveheight': cmocean.cm.amp,
'waveperiod': cmocean.cm.tempo,
'chlorophyll': cmocean.cm.algae,
'iron': cmocean.cm.amp,
'oxygen': cmocean.cm.oxy,
'phosphate': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'phosphate.txt'))),
'nitrate': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'nitrate.txt'))),
'nitrate concentration': cmocean.cm.tempo,
'ice': cmocean.cm.ice,
'phytoplankton': cmocean.cm.deep_r,
'diatoms concentration as nitrogen': cmocean.cm.algae,
'flagellates concentration as nitrogen': cmocean.cm.algae,
'mesodinium rubrum concentration as nitrogen': cmocean.cm.algae,
'mesozooplankton concentration as nitrogen': cmocean.cm.algae,
'microzooplankton concentration as nitrogen': cmocean.cm.algae,
'silicate': make_colormap([
_c('#ffffff'),
_c('#57a6bd'),
]),
'silicon concentration': cmocean.cm.turbid,
'biogenic silicon concentration': cmocean.cm.turbid,
'ph': make_colormap([
_c('#ED1B26'),
_c('#F46432'), 0.1, _c('#F46432'),
_c('#FFC324'), 0.2, _c('#FFC324'),
_c('#84C341'), 0.3, _c('#84C341'),
_c('#33A94B'), 0.4, _c('#33A94B'),
_c('#0AB8B6'), 0.5, _c('#0AB8B6'),
_c('#4591CD'), 0.6, _c('#4591CD'),
_c('#5A51A2'), 0.7, _c('#5A51A2'),
_c('#63459D'), 0.8, _c('#63459D'),
_c('#6C2180'), 0.9, _c('#6C2180'),
_c('#49176E')
]),
'mercator_current': make_colormap([
_c('#e1f3fc'),
_c('#7ebce5'), 0.17, _c('#7ebce5'),
_c('#4990bd'), 0.25, _c('#4990bd'),
_c('#4eb547'), 0.42, _c('#4eb547'),
_c('#f3e65b'), 0.55, _c('#f3e65b'),
_c('#f58a35'), 0.67, _c('#f58a35'),
_c('#d72928'), 0.83, _c('#d72928'),
_c('#901418')
]),
'mercator': make_colormap([
_c('#1d3b7a'),
_c('#134aaa'), 0.05, _c('#134aaa'),
_c('#075ce4'), 0.10, _c('#075ce4'),
_c('#1976fa'), 0.15, _c('#1976fa'),
_c('#4b9bf1'), 0.20, _c('#4b9bf1'),
_c('#80c0e7'), 0.25, _c('#80c0e7'),
_c('#4dd9f0'), 0.30, _c('#4dd9f0'),
_c('#1df1f9'), 0.35, _c('#1df1f9'),
_c('#00efcf'), 0.40, _c('#00efcf'),
_c('#04d273'), 0.45, _c('#04d273'),
_c('#0cb20f'), 0.50, _c('#0cb20f'),
_c('#66cf09'), 0.55, _c('#66cf09'),
_c('#c8ed03'), 0.60, _c('#c8ed03'),
_c('#fef000'), 0.65, _c('#fef000'),
_c('#fed100'), 0.70, _c('#fed100'),
_c('#feaf00'), 0.75, _c('#feaf00'),
_c('#fe6a00'), 0.80, _c('#fe6a00'),
_c('#fe2800'), 0.85, _c('#fe2800'),
_c('#d80100'), 0.90, _c('#d80100'),
_c('#a00000'), 0.95, _c('#a00000'),
_c('#610000')
]),
'anomaly': make_colormap([
_c('#000064'),
_c('#0000b2'), 0.090909, _c('#0000b2'),
_c('#0000ff'), 0.181818, _c('#0000ff'),
_c('#0748ff'), 0.272727, _c('#0748ff'),
_c('#9291ff'), 0.363636, _c('#9291ff'),
_c('#dbd9ff'), 0.454545, _c('#dbd9ff'),
_c('#ffffff'), 0.500000, _c('#ffffff'),
_c('#ffd9dd'), 0.545455, _c('#ffd9dd'),
_c('#ff9193'), 0.636364, _c('#ff9193'),
_c('#ff484a'), 0.727273, _c('#ff484a'),
_c('#ff0000'), 0.818182, _c('#ff0000'),
_c('#b20000'), 0.909091, _c('#b20000'),
_c('#640000')
]),
'temperature-old': make_colormap([
_c('#0000ff'),
_c('#0748ff'), 0.125, _c('#0748ff'),
_c('#9291ff'), 0.250, _c('#9291ff'),
_c('#dbd9ff'), 0.375, _c('#dbd9ff'),
_c('#ffffff'), 0.500, _c('#ffffff'),
_c('#ffd9dd'), 0.625, _c('#ffd9dd'),
_c('#ff9193'), 0.750, _c('#ff9193'),
_c('#ff484a'), 0.875, _c('#ff484a'),
_c('#ff0000')
]),
'grey': make_colormap([
_c('#ffffff'),
_c('#000000')
]),
'potential sub surface channel': mcolors.ListedColormap(
['#ecf0f1','#f57732']
),
'thermal': cmocean.cm.thermal,
'neo_sst': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'neo_sst.txt'))),
'BuYlRd': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'BuYlRd.txt'))),
'temperature': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'temperature.txt'))),
}
colormaps['wind'] = colormaps['velocity']
# This is a little odd, but it has a purpose.
# These gettext calls don't really do anything, but it registers the keys with
# Babel so that they'll end up in the translation list.
# If the gettext calls were in the definition of colormap_names, they'd get
# executed before the user's locale is known and would always be in English.
gettext('Ammonium Concentration')
gettext('Anomaly')
gettext('Bathymetry')
gettext('Biogenic Silicon Concentration')
gettext('Chlorophyll')
gettext('Diatoms Concentration as Nitrogen')
gettext('Dissolved Organic Nitrogen Concentration')
gettext('Eastward Current')
gettext('Flagellates Concentration as Nitrogen')
gettext('Greyscale')
gettext('Ice')
gettext('Iron')
gettext('Mercator Ocean Current')
gettext('Mercator')
gettext('Mesodinium rubrum Concentration as Nitrogen')
gettext('Mesozooplankton Concentration as Nitrogen')
gettext('Microzooplankton Concentration as Nitrogen')
gettext('Nitrate')
gettext('Nitrate Concentration')
gettext('Northward Current')
gettext('Oxygen')
gettext('Particulate Organic Nitrogen Concentration')
gettext('Phosphate')
gettext('Phytoplankton')
gettext('Salinity')
gettext('Sea Surface Height (Free Surface)')
gettext('Silicate')
gettext('Silicon Concentration')
gettext('Speed')
gettext('Speed of Current')
gettext('Temperature')
gettext('Velocity')
gettext('Wave Height')
gettext('Wave Period')
gettext('Thermal')
gettext('NEO SST')
gettext('Color Brewer Blue-Yellow-Red')
gettext('Temperature (old)')
gettext('Vorticity')
gettext('Density')
gettext('Deep')
gettext('Balance')
gettext('Potential Sub Surface Channel')
colormap_names = {
'ammonium concentration': 'Ammonium Concentration',
'balance': 'Balance',
'anomaly': 'Anomaly',
'bathymetry': 'Bathymetry',
'chlorophyll': 'Chlorophyll',
'dissolved organic nitrogen concentration': 'Dissolved Organic Nitrogen Concentration',
'diatoms concentration as nitrogen': 'Diatoms Concentration as Nitrogen',
'flagellates concentration as nitrogen': 'Flagellates Concentration as Nitrogen',
'freesurface': 'Sea Surface Height (Free Surface)',
'grey': 'Greyscale',
'ice': 'Ice',
'iron': 'Iron',
'mercator_current': 'Mercator Ocean Current',
'mercator': 'Mercator',
'mesodinium rubrum concentration as nitrogen': 'Mesodinium rubrum Concentration as Nitrogen',
'mesozooplankton concentration as nitrogen': 'Mesozooplankton Concentration as Nitrogen',
'microzooplankton concentration as nitrogen': 'Microzooplankton Concentration as Nitrogen',
'nitrate': 'Nitrate',
'nitrate concentration': 'Nitrate Concentration',
'oxygen': 'Oxygen',
'particulate organic nitrogen concentration': 'Particulate Organic Nitrogen Concentration',
'phosphate': 'Phosphate',
'phytoplankton': 'Phytoplankton',
'potential sub surface channel':'Potential Sub Surface Channel',
'salinity': 'Salinity',
'silicate': 'Silicate',
'silicon concentration': 'Silicon Concentration',
'biogenic silicon concentration': 'Biogenic Silicon Concentration',
'speed': 'Speed',
'speed of current': 'Speed of Current',
'temperature': 'Temperature',
'velocity': 'Velocity',
'eastward current': 'Eastward Current',
'northward current': 'Northward Current',
'waveheight': 'Wave Height',
'waveperiod': 'Wave Period',
'thermal': 'Thermal',
'neo_sst': 'NEO SST',
'BuYlRd': 'Color Brewer Blue-Yellow-Red',
'temperature-old': 'Temperature (old)',
'vorticity': 'Vorticity',
'density': 'Density',
'deep': 'Deep'
}
def plot_colormaps():
fig, axes = plt.subplots(
nrows=len(colormap_names),
figsize=(11, 0.3 * len(colormap_names))
)
fig.subplots_adjust(top=0.925, bottom=0.01, left=0.01, right=0.6)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig.suptitle(gettext("Ocean Navigator Colourmaps"), fontsize=14)
for ax, cmap in zip(axes, sorted(colormap_names, key=colormap_names.get)):
ax.imshow(gradient, aspect='auto', cmap=colormaps.get(cmap))
pos = list(ax.get_position().bounds)
x_text = pos[2] + 0.025
y_text = pos[1] + pos[3] / 2.
fig.text(
x_text, y_text, colormap_names[cmap], va='center', ha='left', fontsize=12
)
for ax in axes:
ax.set_axis_off()
buf = BytesIO()
try:
plt.savefig(buf, format="png", dpi='figure')
plt.close(fig)
return buf.getvalue()
finally:
buf.close()
if __name__ == '__main__':
import viscm
import matplotlib.cm
import sys
for k, v in colormaps.items():
matplotlib.cm.register_cmap(name=k, cmap=v)
maps = [i for i in colormaps]
if len(sys.argv) > 1:
maps = sys.argv[1:]
for m in maps:
v = viscm.viscm(m, uniform_space="CAM02-UCS")
v.fig.set_size_inches(20, 12)
v.fig.savefig(m + ".png")
| gpl-3.0 | 4,106,671,356,486,534,700 | 34.320122 | 97 | 0.603107 | false | 2.914465 | false | false | false |
GGiecold/PySCUBA | src/PySCUBA/__main__.py | 1 | 12824 | #!/usr/bin/env python
# PySCUBA/src/PySCUBA/__main__.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: [email protected]; [email protected]
from os import getcwd, path, remove
import Queue
import sys
try:
import igraph
except ImportError, e:
pass
import numpy as np
from PIL import Image, ImageQt
from PyQt4 import QtCore, QtGui
from sklearn.preprocessing import StandardScaler
import wand.image
from .Gap_stats import gap_stats
from .Preprocessing import cytometry_preprocess, PCR_preprocess, RNASeq_preprocess
from . import PySCUBA_design, SCUBA_core
def plot_tree(cluster_indices, parent_clusters, output_directory = None):
"""Display a bifurcation tree.
"""
if igraph not in sys.modules:
return
if output_directory is None:
output_directory = getcwd()
vertex_sizes = np.bincount(cluster_indices)
N_vertices = vertex_sizes.size
vertex_sizes = np.divide(vertex_sizes, float(np.sum(vertex_sizes)))
vertex_sizes *= 100 * N_vertices
vertex_sizes += 40 + (N_vertices / 3)
tree = igraph.Graph()
tree.add_vertices(N_vertices)
cluster_tally = 0
for k, v in parent_clusters.items():
if k > 0:
tree.add_edges(zip(v, xrange(cluster_tally, cluster_tally + len(v))))
cluster_tally += len(v)
tree.vs['label'] = xrange(N_vertices)
layout = tree.layout('fr')
name = path.join(output_directory, 'SCUBA_tree.pdf')
igraph.plot(tree, name, bbox = (200 * N_vertices, 200 * N_vertices), margin = 250,
layout = layout, edge_width = [7] * (N_vertices - 1),
vertex_label_dist = 0, vertex_label_size = 30,
vertex_size = vertex_sizes.tolist())
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in xrange(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
class WorkerThread(QtCore.QThread):
def __init__(self, result_queue, data_type, data_path, cluster_mode, log_mode,
pseudotime_mode, pcv_method, anchor_gene,
exclude_marker_names):
super(WorkerThread, self).__init__()
self.result_queue = result_queue
self.data_type = str(data_type)
self.data_path = data_path
cluster_mode = str(cluster_mode).lower()
self.cluster_mode = None if (cluster_mode == 'none') else cluster_mode
self.log_mode = log_mode
self.pseudotime_mode = pseudotime_mode
self.pcv_method = pcv_method
self.anchor_gene = anchor_gene
self.exclude_marker_names = exclude_marker_names
def __del__(self):
self.wait()
def run(self):
preprocessing_fcts = [cytometry_preprocess, PCR_preprocess,
RNASeq_preprocess]
data_type_dict = {'cytometry': 0, 'PCR': 1, 'RNASeq': 2}
cell_IDs, data, markers, cell_stages, data_tag, \
output_directory = preprocessing_fcts[data_type_dict[self.data_type]](
self.data_path, self.log_mode, self.pseudotime_mode,
self.pcv_method, self.anchor_gene, self.exclude_marker_names)
cell_stages = 1 + one_to_max(cell_stages)
data = StandardScaler(with_std = False).fit_transform(data)
if self.cluster_mode in {'pca', 'pca2'}:
PCA_components, data = SCUBA_core.PCA_analysis(data, self.cluster_mode,
cell_stages if (self.cluster_mode == 'pca2') else None)
centroid_coords, cluster_indices, \
parent_clusters = SCUBA_core.initialize_tree(data, cell_stages)
centroid_coords, cluster_indices, \
parent_clusters = SCUBA_core.refine_tree(data, centroid_coords,
cluster_indices, parent_clusters, cell_stages, output_directory)
plot_tree(cluster_indices, parent_clusters, output_directory)
if self.cluster_mode in {'pca', 'pca2'}:
weights = PCA_components
else:
weights = None
bifurcation_info, bifurcation_axes, \
bifurcation_projections = SCUBA_core.bifurcation_direction(data, cell_IDs,
markers, parent_clusters, centroid_coords, output_directory,
weights)
if bifurcation_info:
data_per_split, parameters_per_split = SCUBA_core.bifurcation_analysis(
cluster_indices, bifurcation_info, bifurcation_axes,
bifurcation_projections, output_directory)
self.result_queue.put(output_directory)
self.result_queue.task_done()
return
class LoadImageThread(QtCore.QThread):
def __init__(self, source_file):
super(LoadImageThread, self).__init__()
self.source_file = source_file
def __del__(self):
self.wait()
def run(self):
self.emit(QtCore.SIGNAL("showImage(QString)"), self.source_file)
class PySCUBApp(QtGui.QMainWindow, PySCUBA_design.Ui_MainWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.setupUi(self)
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.data_path = './'
self.selectDatasetButton.clicked.connect(self.selectDataset)
self.log_mode = True
self.logCheckBox.stateChanged.connect(self.logStateChanged)
self.pseudotime_mode = True
self.pseudotimeCheckBox.stateChanged.connect(
self.pseudotimeStateChanged)
self.pcv_method = 'Rprincurve'
self.anchor_gene = None
self.exclude_marker_names = None
self.result_queue = Queue.Queue()
self.okButton.clicked.connect(self.buttonClicked)
self.okButton.clicked.connect(self.OK)
self.cancelButton.clicked.connect(self.buttonClicked)
self.zoom = 0
self.pixMap = QtGui.QPixmap()
self.displayFileButton.setEnabled(False)
self.displayFileButton.clicked.connect(self.selectDisplay)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes
| QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
def selectDataset(self):
dataFileDialog = QtGui.QFileDialog(self)
self.data_path = str(dataFileDialog.getOpenFileName())
self.statusbar.showMessage("{0} ready to be "
"analyzed".format(path.basename(self.data_path)))
self.cancelButton.setEnabled(True)
self.okButton.setEnabled(True)
def logStateChanged(self, int):
if self.logCheckBox.isChecked():
self.log_mode = True
else:
self.log_mode = False
def pseudotimeStateChanged(self, int):
if self.pseudotimeCheckBox.isChecked():
self.pseudotime_mode = True
else:
self.pseudotime_mode = False
def buttonClicked(self):
sender = self.sender()
self.statusbar.showMessage(sender.text() + " was pressed.")
self.button_clicked = sender.text()
def OK(self):
self.statusbar.showMessage('Work in progress...')
self.worker_thread = WorkerThread(self.dataTypeComboBox.currentText(),
self.data_path, self.clusterModeComboBox.currentText(),
self.log_mode, self.pseudotime_mode, self.pcv_method,
self.anchor_gene, self.exclude_marker_names)
self.connect(self.worker_thread, QtCore.SIGNAL("update(QString)"),
self.worker_thread.run)
self.connect(self.worker_thread, QtCore.SIGNAL("finished()"), self.doneRunning)
self.worker_thread.start()
self.cancelButton.setEnabled(True)
self.okButton.setEnabled(False)
def doneRunning(self):
if self.button_clicked == 'Cancel':
self.cancelRunning()
else:
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.displayFileButton.setEnabled(True)
self.directory = self.result_queue.get()
self.statusbar.showMessage("PySCUBA has completed the "
"analysis of your data.")
QtGui.QMessageBox.information(self, "Status Message",
"Mission accomplished!")
def cancelRunning(self):
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.worker_thread.terminate()
self.statusbar.showMessage("PySCUBA was interrupted!")
QtGui.QMessageBox.information(self, "Status Message",
"PySCUBA was interrupted!")
def selectDisplay(self):
filters = 'Images (*.jpg *.pdf *.png)'
select_filters = 'Images (*.jpg *.pdf *.png)'
source_file = QtGui.QFileDialog.getOpenFileName(self,
'Select file to display', self.directory, filters, select_filters)
self.load_image_thread = LoadImageThread(source_file)
self.connect(self.load_image_thread, QtCore.SIGNAL("showImage(QString)"),
self.showImage)
self.load_image_thread.start()
def zoomFactor(self):
return self.zoom
def wheelEvent(self, event):
if not self.pixMap.isNull():
if event.delta() < 0:
factor = 0.8
self.zoom -= 1
else:
factor = 1.25
self.zoom += 1
if self.zoom < 0:
self.zoom = 0
elif self.zoom == 0:
self.fitInView()
else:
self.graphicsView.scale(factor, factor)
else:
pass
def fitInView(self):
rect = QtCore.QRectF(self.pixMap.rect())
if not rect.isNull():
unity = self.graphicsView.transform().mapRect(
QtCore.QRectF(0, 0, 1, 1))
self.graphicsView.scale(1.0 / unity.width(), 1.0 / unity.height())
view_rect = self.graphicsView.viewport().rect()
scene_rect = self.graphicsView.transform().mapRect(rect)
factor = min(view_rect.width() / scene_rect.width(),
view_rect.height() / scene_rect.height())
self.graphicsView.scale(factor, factor)
self.graphicsView.centerOn(rect.center())
self.zoom = 0
def showImage(self, source_file):
source_file = str(source_file)
target_file = source_file.split('.')[0] + '.jpg'
with wand.image.Image(filename=source_file) as img:
img.format = 'jpeg'
img.save(filename=target_file)
img = Image.open(target_file, 'r')
width, height = img.size
self.scene.clear()
self.zoom = 0
self.imgQ = ImageQt.ImageQt(img)
self.pixMap = QtGui.QPixmap.fromImage(self.imgQ)
if self.pixMap and not self.pixMap.isNull():
self.graphicsView.setDragMode(
QtGui.QGraphicsView.ScrollHandDrag)
self.scene.addPixmap(self.pixMap)
self.fitInView()
else:
self.graphicsView.setDragMode(QtGui.QGraphicsView.NoDrag)
self.scene.addPixmap(QtGui.QPixmap())
self.scene.update()
remove(target_file)
def main():
app = QtGui.QApplication(sys.argv)
form = PySCUBApp()
form.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit | 6,625,462,015,521,234,000 | 33.106383 | 87 | 0.59724 | false | 3.880182 | false | false | false |
splunk/eventgen | splunk_eventgen/lib/plugins/output/s2s.py | 1 | 6794 | import socket
import struct
from splunk_eventgen.lib.outputplugin import OutputPlugin
class S2S:
"""
Encode and send events to Splunk over the S2S V2 wire protocol.
It should be noted V2 is a much older protocol and is no longer utilized by any Splunk Forwarder.
It should still work, but its a very simple protocol and we've advanced pretty far since then.
However, if you have fully cooked events, its very lightweight and very easy to implement
which is why I elected to implement this version.
"""
s = None
signature_sent = None
useOutputQueue = True
def __init__(self, host="localhost", port=9997):
"""
Initialize object. Need to know Splunk host and port for the TCP Receiver
"""
self._open_connection(host, port)
self.signature_sent = False
def _open_connection(self, host="localhost", port=9997):
"""
Open a connection to Splunk and return a socket
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, int(port)))
def _encode_sig(
self, serverName="s2s-api".encode("utf-8"), mgmtPort="9997".encode("utf-8")
):
"""
Create Signature element of the S2S Message. Signature is C struct:
struct S2S_Signature
{
char _signature[128];
char _serverName[256];
char _mgmtPort[16];
};
"""
if not self.signature_sent:
self.signature_sent = True
return struct.pack(
"!128s256s16s",
"--splunk-cooked-mode-v2--".encode("utf-8"),
serverName,
mgmtPort,
).decode("utf-8")
else:
return ""
def _encode_string(self, tosend=""):
"""
Encode a string to be sent across the wire to splunk
Wire protocol has an unsigned integer of the length of the string followed
by a null terminated string.
"""
tosend = str(tosend).encode("utf-8")
return struct.pack("!I%ds" % (len(tosend) + 1), len(tosend) + 1, tosend).decode(
"utf-8"
)
def _encode_key_value(self, key="", value=""):
"""
Encode a key/value pair to send across the wire to splunk
A key value pair is merely a concatenated set of encoded strings.
"""
return "%s%s" % (self._encode_string(key), self._encode_string(value))
def _encode_event(
self, index="main", host="", source="", sourcetype="", _raw="_done", _time=None
):
# Create signature
sig = self._encode_sig()
msg_size = len(
struct.pack("!I", 0)
) # size of unsigned 32 bit integer, which is the count of map entries
maps = 1
# May not have these, so set them first
encoded_source = False
encoded_sourcetype = False
encoded_host = False
# Encode source
if len(source) > 0:
encoded_source = self._encode_key_value(
"MetaData:Source", "source::" + source
)
maps += 1
msg_size += len(encoded_source)
# Encode sourcetype
if len(sourcetype) > 0:
encoded_sourcetype = self._encode_key_value(
"MetaData:Sourcetype", "sourcetype::" + sourcetype
)
maps += 1
msg_size += len(encoded_sourcetype)
# Encode host
if len(host) > 0:
encoded_host = self._encode_key_value("MetaData:Host", "host::" + host)
maps += 1
msg_size += len(encoded_host)
# Encode index
encoded_index = self._encode_key_value("_MetaData:Index", index)
maps += 1
msg_size += len(encoded_index)
# Encode _raw
encoded_raw = self._encode_key_value("_raw", _raw)
msg_size += len(encoded_raw)
# Will include a 32 bit integer 0 between the end of raw and the _raw trailer
msg_size += len(struct.pack("!I", 0))
# Encode "_raw" trailer... seems to just the string '_raw' repeated again at the end of the _raw field
encoded_raw_trailer = self._encode_string("_raw")
msg_size += len(encoded_raw_trailer)
# Add _done... Not sure if there's a penalty to setting this for every event
# but otherwise we don't flush immediately
encoded_done = self._encode_key_value("_done", "_done")
maps += 1
msg_size += len(encoded_done)
# Encode _time
if _time is not None:
encoded_time = self._encode_key_value("_time", _time)
msg_size += len(encoded_time)
maps += 1
# Create buffer, starting with the signature
buf = sig
# Add 32 bit integer with the size of the msg, calculated earlier
buf += struct.pack("!I", msg_size).decode("utf-8")
# Add number of map entries, which is 5, index, host, source, sourcetype, raw
buf += struct.pack("!I", maps).decode("utf-8")
# Add the map entries, index, source, sourcetype, host, raw
buf += encoded_index
buf += encoded_host if encoded_host else ""
buf += encoded_source if encoded_source else ""
buf += encoded_sourcetype if encoded_sourcetype else ""
buf += encoded_time if encoded_time else ""
buf += encoded_done
buf += encoded_raw
# Add dummy zero
buf += struct.pack("!I", 0).decode("utf-8")
# Add trailer raw
buf += encoded_raw_trailer
return buf
def send_event(
self, index="main", host="", source="", sourcetype="", _raw="", _time=None
):
"""
Encode and send an event to Splunk
"""
if len(_raw) > 0:
e = self._encode_event(index, host, source, sourcetype, _raw, _time)
self.s.sendall(e.encode("utf-8"))
def close(self):
"""
Close connection and send final done event
"""
self.s.close()
class S2SOutputPlugin(OutputPlugin):
name = "s2s"
MAXQUEUELENGTH = 10
s2s = None
useOutputQueue = True
def __init__(self, sample, output_counter=None):
OutputPlugin.__init__(self, sample, output_counter)
def flush(self, q):
if len(q) < 1:
return
if self.s2s is None:
self.s2s = S2S(self._sample.splunkHost, self._sample.splunkPort)
for m in q:
self.s2s.send_event(
m["index"],
m["host"],
m["source"],
m["sourcetype"],
m["_raw"],
m["_time"],
)
def load():
"""Returns an instance of the plugin"""
return S2SOutputPlugin
| apache-2.0 | -7,765,024,850,134,000,000 | 31.352381 | 110 | 0.553429 | false | 4.003536 | false | false | false |
p-l-/ivre | ivre/tools/scan2db.py | 1 | 5209 | #! /usr/bin/env python
# This file is part of IVRE.
# Copyright 2011 - 2020 Pierre LALET <[email protected]>
#
# IVRE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IVRE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IVRE. If not, see <http://www.gnu.org/licenses/>.
"""Parse NMAP scan results and add them in DB."""
from argparse import ArgumentParser
import os
import sys
import ivre.db
import ivre.utils
from ivre.view import nmap_record_to_view
import ivre.xmlnmap
def recursive_filelisting(base_directories, error):
"""Iterator on filenames in base_directories. Ugly hack: error is a
one-element list that will be set to True if one of the directories in
base_directories does not exist.
"""
for base_directory in base_directories:
if not os.path.exists(base_directory):
ivre.utils.LOGGER.warning("directory %r does not exist", base_directory)
error[0] = True
continue
if not os.path.isdir(base_directory):
yield base_directory
continue
for root, _, files in os.walk(base_directory):
for leaffile in files:
yield os.path.join(root, leaffile)
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument("scan", nargs="*", metavar="SCAN", help="Scan results")
parser.add_argument("-c", "--categories", default="", help="Scan categories.")
parser.add_argument("-s", "--source", default=None, help="Scan source.")
parser.add_argument(
"-t", "--test", action="store_true", help="Test mode (JSON output)."
)
parser.add_argument(
"--test-normal", action="store_true", help='Test mode ("normal" Nmap output).'
)
parser.add_argument(
"--ports",
"--port",
action="store_true",
help='Store only hosts with a "ports" element.',
)
parser.add_argument(
"--open-ports", action="store_true", help="Store only hosts with open ports."
)
parser.add_argument(
"--masscan-probes",
nargs="+",
metavar="PROBE",
help="Additional Nmap probes to use when trying to "
"match Masscan results against Nmap service "
"fingerprints.",
)
parser.add_argument(
"--zgrab-port",
metavar="PORT",
help="Port used for the zgrab scan. This might be "
"needed since the port number does not appear in the"
"result.",
)
parser.add_argument(
"--force-info",
action="store_true",
help="Force information (AS, country, city, etc.)"
" renewal (only useful with JSON format)",
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="Import all files from given directories.",
)
parser.add_argument(
"--update-view", action="store_true", help="Merge hosts in current view"
)
parser.add_argument(
"--no-update-view",
action="store_true",
help="Do not merge hosts in current view (default)",
)
args = parser.parse_args()
database = ivre.db.db.nmap
categories = args.categories.split(",") if args.categories else []
if args.test:
args.update_view = False
args.no_update_view = True
database = ivre.db.DBNmap()
if args.test_normal:
args.update_view = False
args.no_update_view = True
database = ivre.db.DBNmap(output_mode="normal")
# Ugly hack: we use a one-element list so that
# recursive_filelisting can modify its value
error = [False]
if args.recursive:
scans = recursive_filelisting(args.scan, error)
else:
scans = args.scan
if not args.update_view or args.no_update_view:
callback = None
else:
def callback(x):
return ivre.db.db.view.store_or_merge_host(nmap_record_to_view(x))
count = 0
for scan in scans:
if not os.path.exists(scan):
ivre.utils.LOGGER.warning("file %r does not exist", scan)
error[0] = True
continue
try:
if database.store_scan(
scan,
categories=categories,
source=args.source,
needports=args.ports,
needopenports=args.open_ports,
force_info=args.force_info,
masscan_probes=args.masscan_probes,
callback=callback,
zgrab_port=args.zgrab_port,
):
count += 1
except Exception:
ivre.utils.LOGGER.warning("Exception (file %r)", scan, exc_info=True)
error[0] = True
ivre.utils.LOGGER.info("%d results imported.", count)
sys.exit(error[0])
| gpl-3.0 | -717,743,129,055,448,400 | 32.178344 | 86 | 0.609906 | false | 3.916541 | true | false | false |
jamslevy/gsoc | app/soc/views/helper/surveys.py | 1 | 7932 | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom widgets used for form fields.
"""
__authors__ = [
'JamesLevy" <[email protected]>',
]
from django import forms
from django.forms import util
from django.forms import widgets
from django.utils import html
from django.utils import simplejson
from django.utils import safestring
from soc.models.survey import SurveyContent, Survey, SurveyRecord
from soc.logic import dicts
import cgi
import wsgiref.handlers
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
class SurveyForm(djangoforms.ModelForm):
def __init__(self, *args, **kwargs):
""" This class is used to produce survey forms for several
circumstances:
- Admin creating survey from scratch
- Admin updating existing survey
- User taking survey
- User updating already taken survey
Using dynamic properties of the this_survey model (if passed
as an arg) the survey form is dynamically formed.
TODO: Form now scrambles the order of fields. If it's important
that fields are listed in a certain order, an alternative to
the schema dictionary will have to be used.
"""
kwargs['initial']= {}
this_survey = kwargs.get('this_survey', None)
survey_record = kwargs.get('survey_record', None)
del kwargs['this_survey']
del kwargs['survey_record']
if this_survey:
fields = {}
survey_order = {}
schema = this_survey.get_schema()
for property in this_survey.dynamic_properties():
if survey_record: # use previously entered value
value = getattr(survey_record, property)
else: # use prompts set by survey creator
value = getattr(this_survey, property)
# map out the order of the survey fields
survey_order[schema[property]["index"]] = property
# correct answers? Necessary for grading
if schema[property]["type"] == "long_answer":
fields[property] = forms.fields.CharField(widget=widgets.Textarea()) #custom rows
kwargs['initial'][property] = value
if schema[property]["type"] == "short_answer":
fields[property] = forms.fields.CharField(max_length=40)
kwargs['initial'][property] = value
if schema[property]["type"] == "selection":
these_choices = []
# add all properties, but select chosen one
options = eval( getattr(this_survey, property) )
if survey_record:
these_choices.append( (value, value) )
options.remove(value)
for option in options: these_choices.append( (option, option) )
fields[property] = forms.ChoiceField( choices=tuple(these_choices), widget=forms.Select())
for position, property in survey_order.items():
SurveyForm.base_fields.insert(position, property, fields[property] )
super(SurveyForm, self).__init__(*args, **kwargs)
class Meta:
model = SurveyContent
exclude = ['schema']
class EditSurvey(widgets.Widget):
"""
Edit Survey, or Create Survey if not this_survey arg given.
"""
WIDGET_HTML = """
<div id="survey_widget"><table> %(survey)s </table> %(options_html)s </div>
<script type="text/javascript" src="/soc/content/js/edit_survey.js"></script>
"""
QUESTION_TYPES = {"short_answer": "Short Answer", "long_answer": "Long Answer", "selection": "Selection" }
BUTTON_TEMPLATE = """
<button id="%(type_id)s" onClick="return false;">Add %(type_name)s Question</button>
"""
OPTIONS_HTML = """
<div id="survey_options"> %(options)s </div>
"""
SURVEY_TEMPLATE = """
<tbody></tbody>
"""
def __init__(self, *args, **kwargs):
"""Defines the name, key_name and model for this entity.
"""
self.this_survey = kwargs.get('this_survey', None)
if self.this_survey: del kwargs['this_survey']
super(EditSurvey, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
#print self.entity
#if self.entity: survey = self.SurveyForm(entity)
#else: survey = self.SurveyForm()
survey = SurveyForm(this_survey=self.this_survey, survey_record=None)
survey = str(survey)
if len(survey) == 0: survey = self.SURVEY_TEMPLATE
options = ""
for type_id, type_name in self.QUESTION_TYPES.items():
options += self.BUTTON_TEMPLATE % { 'type_id': type_id, 'type_name': type_name }
options_html = self.OPTIONS_HTML % {'options': options }
result = self.WIDGET_HTML % {'survey': str(survey), 'options_html':options_html }
return result
class TakeSurvey(widgets.Widget):
"""
Take Survey, or Update Survey. """
WIDGET_HTML = """
%(help_text)s <div class="%(status)s"id="survey_widget"><table> %(survey)s </table> </div>
<script type="text/javascript" src="/soc/content/js/take_survey.js"></script>
"""
def render(self, this_survey):
#check if user has already submitted form. If so, show existing form
import soc.models.user
from soc.logic.models.user import logic as user_logic
user = user_logic.getForCurrentAccount()
survey_record = SurveyRecord.gql("WHERE user = :1 AND this_survey = :2", user, this_survey.survey_parent.get()).get()
survey = SurveyForm(this_survey=this_survey, survey_record=survey_record)
if survey_record:
help_text = "Edit and re-submit this survey."
status = "edit"
else:
help_text = "Please complete this survey."
status = "create"
result = self.WIDGET_HTML % {'survey': str(survey), 'help_text': help_text,
'status': status }
return result
class SurveyResults(widgets.Widget):
"""
Render List of Survey Results For Given Survey
"""
def render(self, this_survey, params, filter=filter, limit=1000,
offset=0, order=[], idx=0, context={}):
from soc.logic.models.survey import results_logic as results_logic
logic = results_logic
filter = { 'this_survey': this_survey }
data = logic.getForFields(filter=filter, limit=limit, offset=offset,
order=order)
params['name'] = "Survey Results"
content = {
'idx': idx,
'data': data,
#'export': export_link, TODO - export to CVS
'logic': logic,
'limit': limit,
}
updates = dicts.rename(params, params['list_params'])
content.update(updates)
contents = [content]
#content = [i for i in contents if i.get('idx') == export]
if len(content) == 1:
content = content[0]
key_order = content.get('key_order')
#if key_order: TODO - list order
#data = [i.toDict(key_order) for i in content['data']]
#filename = "export_%d" % export
#return self.csv(request, data, filename, params, key_order)
from soc.views import helper
import soc.logic.lists
context['list'] = soc.logic.lists.Lists(contents)
for list in context['list']._contents:
list['row'] = 'soc/survey/list/results_row.html'
list['heading'] = 'soc/survey/list/results_heading.html'
list['description'] = 'Survey Results:'
context['properties'] = this_survey.this_survey.dynamic_properties()
context['entity_type'] = "Survey Results"
context['entity_type_plural'] = "Results"
context['no_lists_msg'] = "No Survey Results"
from django.template import loader
markup = loader.render_to_string('soc/survey/results.html', dictionary=context).strip('\n')
return markup
| apache-2.0 | -5,420,985,475,977,241,000 | 33.042918 | 118 | 0.674483 | false | 3.564944 | false | false | false |
boundlessgeo/qgis-webappbuilder-plugin | webappbuilder/exp2js.py | 1 | 6595 | #===============================================================================
# This code belong to the gis2js library, by Nathan Woodrow
# https://github.com/NathanW2/qgs2js
#===============================================================================
from qgis.core import QgsExpression
import re, json
import os
whenfunctions = []
binary_ops = [
"||", "&&",
"==", "!=", "<=", ">=", "<", ">", "~",
"LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE", "===", "!==",
"+", "-", "*", "/", "//", "%", "^",
"+"
]
unary_ops = ["!", "-"]
def compile(expstr, name=None, mapLib=None):
"""
Convert a QgsExpression into a JS function.
"""
return exp2func(expstr, name, mapLib)
def exp2func(expstr, name=None, mapLib=None):
"""
Convert a QgsExpression into a JS function.
"""
global whenfunctions
whenfunctions = []
exp = QgsExpression(expstr)
if expstr:
js = walkExpression(exp.rootNode(), mapLib=mapLib)
else:
js = "true"
if name is None:
import random
import string
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(4))
name += "_eval_expression"
temp = """
function %s(context) {
// %s
var feature = context.feature;
%s
return %s;
}""" % (name,
exp.dump(),
"\n".join(whenfunctions),
js)
return temp, name, exp.dump()
def walkExpression(node, mapLib):
try:
if node.nodeType() == QgsExpression.ntBinaryOperator:
jsExp = handle_binary(node, mapLib)
elif node.nodeType() == QgsExpression.ntUnaryOperator:
jsExp = handle_unary(node, mapLib)
elif node.nodeType() == QgsExpression.ntInOperator:
jsExp = handle_in(node, mapLib)
elif node.nodeType() == QgsExpression.ntFunction:
jsExp = handle_function(node, mapLib)
elif node.nodeType() == QgsExpression.ntLiteral:
jsExp = handle_literal(node)
elif node.nodeType() == QgsExpression.ntColumnRef:
jsExp = handle_columnRef(node, mapLib)
elif node.nodeType() == QgsExpression.ntCondition:
jsExp = handle_condition(node,mapLib)
return jsExp
except:
return "true"
def handle_condition(node, mapLib):
global condtioncounts
subexps = re.findall("WHEN(\s+.*?\s+)THEN(\s+.*?\s+)", node.dump())
count = 1;
js = ""
for sub in subexps:
when = sub[0].strip()
then = sub[1].strip()
whenpart = QgsExpression(when)
thenpart = QgsExpression(then)
whenjs = walkExpression(whenpart.rootNode(), mapLib)
thenjs = walkExpression(thenpart.rootNode(), mapLib)
style = "if" if count == 1 else "else if"
js += """
%s %s {
return %s;
}
""" % (style, whenjs, thenjs)
js = js.strip()
count += 1
elsejs = "null"
if "ELSE" in node.dump():
elseexps = re.findall("ELSE(\s+.*?\s+)END", node.dump())
elsestr = elseexps[0].strip()
exp = QgsExpression(elsestr)
elsejs = walkExpression(exp.rootNode(), mapLib)
funcname = "_CASE()"
temp = """function %s {
%s
else {
return %s;
}
};""" % (funcname, js, elsejs)
whenfunctions.append(temp)
return funcname
def handle_binary(node, mapLib):
op = node.op()
retOp = binary_ops[op]
left = node.opLeft()
right = node.opRight()
retLeft = walkExpression(left, mapLib)
retRight = walkExpression(right, mapLib)
if retOp == "LIKE":
return "(%s.indexOf(%s) > -1)" % (retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "NOT LIKE":
return "(%s.indexOf(%s) == -1)" % (retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "ILIKE":
return "(%s.toLowerCase().indexOf(%s.toLowerCase()) > -1)" % (
retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "NOT ILIKE":
return "(%s.toLowerCase().indexOf(%s.toLowerCase()) == -1)" % (
retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "~":
return "/%s/.test(%s)" % (retRight[1:-2], retLeft[:-1])
elif retOp == "//":
return "(Math.floor(%s %s %s))" % (retLeft, retOp, retRight)
else:
return "(%s %s %s)" % (retLeft, retOp, retRight)
def handle_unary(node, mapLib):
op = node.op()
operand = node.operand()
retOp = unary_ops[op]
retOperand = walkExpression(operand, mapLib)
return "%s %s " % (retOp, retOperand)
def handle_in(node, mapLib):
operand = node.node()
retOperand = walkExpression(operand, mapLib)
list = node.list().dump()
retList = json.dumps(list)
return "%s.indexOf(%s) > -1 " % (retList, retOperand)
def handle_literal(node):
val = node.value()
quote = ""
if isinstance(val, basestring):
quote = "'"
val = val.replace("\n", "\\n")
elif val is None:
val = "null"
return "%s%s%s" % (quote, unicode(val), quote)
def handle_function(node, mapLib):
fnIndex = node.fnIndex()
func = QgsExpression.Functions()[fnIndex]
retArgs = []
retFunc = (func.name().replace("$", "_"))
args = node.args()
if args is not None:
args = args.list()
for arg in args:
retArgs.append(walkExpression(arg, mapLib))
retArgs = ",".join(retArgs)
return "fnc_%s([%s], context)" % (retFunc, retArgs)
def handle_columnRef(node, mapLib):
return "getFeatureAttribute(feature, '%s') " % node.name()
def compile_to_file(exp, name=None, mapLib=None, filename="expressions.js"):
"""
Generate JS function to file from exp and append it to the end of the given file name.
:param exp: The expression to export to JS
:return: The name of the function you can call.
"""
functionjs, name, _ = compile(exp, name=name, mapLib=mapLib)
with open(filename, "a") as f:
f.write("\n\n")
f.write(functionjs)
return name
def is_expression_supported(expr):
path = os.path.join(os.path.dirname(__file__), "js", "qgis2web_expressions.js")
with open(path) as f:
lines = f.readlines()
used = [str(e) for e in re.findall("[a-zA-Z]{2,}?\(", expr)]
unsupported = []
for i, line in enumerate(lines):
for func in used:
if func in line:
if "return false" in lines[i + 1]:
unsupported.append(func[:-1])
break
return unsupported
| gpl-3.0 | -4,926,319,765,662,353,000 | 28.573991 | 90 | 0.538741 | false | 3.517333 | false | false | false |
rsjohnco/rez | src/rezplugins/release_hook/command.py | 1 | 4175 | """
Executes pre- and post-release shell commands
"""
from rez.release_hook import ReleaseHook
from rez.exceptions import ReleaseHookCancellingError
from rez.config import config
from rez.utils.logging_ import print_debug
from rez.vendor.schema.schema import Schema, Or, Optional, Use, And
from rez.vendor.sh.sh import Command, ErrorReturnCode, sudo, which
import getpass
import sys
import os
class CommandReleaseHook(ReleaseHook):
commands_schema = Schema(
{"command": basestring,
Optional("args"): Or(And(basestring,
Use(lambda x: x.strip().split())),
[basestring]),
Optional("user"): basestring})
schema_dict = {
"print_commands": bool,
"print_output": bool,
"print_error": bool,
"cancel_on_error": bool,
"stop_on_error": bool,
"pre_build_commands": [commands_schema],
"pre_release_commands": [commands_schema],
"post_release_commands": [commands_schema]}
@classmethod
def name(cls):
return "command"
def __init__(self, source_path):
super(CommandReleaseHook, self).__init__(source_path)
def execute_command(self, cmd_name, cmd_arguments, user, errors):
def _err(msg):
errors.append(msg)
if self.settings.print_error:
print >> sys.stderr, msg
def _execute(cmd, arguments):
try:
result = cmd(*(arguments or []))
if self.settings.print_output:
print result.stdout.strip()
except ErrorReturnCode as e:
# `e` shows the command that was run
msg = "command failed:\n%s" % str(e)
_err(msg)
return False
return True
if not os.path.isfile(cmd_name):
cmd_full_path = which(cmd_name)
else:
cmd_full_path = cmd_name
if not cmd_full_path:
msg = "%s: command not found" % cmd_name
_err(msg)
return False
run_cmd = Command(cmd_full_path)
if user == 'root':
with sudo:
return _execute(run_cmd, cmd_arguments)
elif user and user != getpass.getuser():
raise NotImplementedError # TODO
else:
return _execute(run_cmd, cmd_arguments)
def _release(self, commands, errors=None):
for conf in commands:
if self.settings.print_commands or config.debug("package_release"):
from subprocess import list2cmdline
toks = [conf["command"]] + conf.get("args", [])
msg = "running command: %s" % list2cmdline(toks)
if self.settings.print_commands:
print msg
else:
print_debug(msg)
if not self.execute_command(cmd_name=conf.get("command"),
cmd_arguments=conf.get("args"),
user=conf.get("user"),
errors=errors):
if self.settings.stop_on_error:
return
def pre_build(self, user, install_path, **kwargs):
errors = []
self._release(self.settings.pre_build_commands, errors=errors)
if errors and self.settings.cancel_on_error:
raise ReleaseHookCancellingError(
"The following pre-build commands failed:\n%s"
% '\n\n'.join(errors))
def pre_release(self, user, install_path, **kwargs):
errors = []
self._release(self.settings.pre_release_commands, errors=errors)
if errors and self.settings.cancel_on_error:
raise ReleaseHookCancellingError(
"The following pre-release commands failed:\n%s"
% '\n\n'.join(errors))
def post_release(self, user, install_path, variants, **kwargs):
self._release(self.settings.post_release_commands)
def register_plugin():
return CommandReleaseHook
| gpl-3.0 | 1,861,106,307,081,865,500 | 34.991379 | 79 | 0.544192 | false | 4.339917 | false | false | false |
BiznetGIO/horizon | openstack_dashboard/test/integration_tests/pages/pageobject.py | 5 | 3284 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
from openstack_dashboard.test.integration_tests import basewebobject
class PageObject(basewebobject.BaseWebObject):
"""Base class for page objects."""
PARTIAL_LOGIN_URL = 'auth/login'
def __init__(self, driver, conf):
"""Constructor."""
super(PageObject, self).__init__(driver, conf)
self._page_title = None
@property
def page_title(self):
return self.driver.title
def is_the_current_page(self, do_assert=False):
found_expected_title = self.page_title.startswith(self._page_title)
if do_assert:
self.assertTrue(
found_expected_title,
"Expected to find %s in page title, instead found: %s"
% (self._page_title, self.page_title))
return found_expected_title
@property
def login_url(self):
base_url = self.conf.dashboard.dashboard_url
if not base_url.endswith('/'):
base_url += '/'
return urlparse.urljoin(base_url, self.PARTIAL_LOGIN_URL)
def get_url_current_page(self):
return self.driver.current_url
def close_window(self):
return self.driver.close()
def is_nth_window_opened(self, n):
return len(self.driver.window_handles) == n
def switch_window(self, window_name=None, window_index=None):
"""Switches focus between the webdriver windows.
Args:
- window_name: The name of the window to switch to.
- window_index: The index of the window handle to switch to.
If the method is called without arguments it switches to the
last window in the driver window_handles list.
In case only one window exists nothing effectively happens.
Usage:
page.switch_window('_new')
page.switch_window(2)
page.switch_window()
"""
if window_name is not None and window_index is not None:
raise ValueError("switch_window receives the window's name or "
"the window's index, not both.")
if window_name is not None:
self.driver.switch_to.window(window_name)
elif window_index is not None:
self.driver.switch_to.window(
self.driver.window_handles[window_index])
else:
self.driver.switch_to.window(self.driver.window_handles[-1])
def go_to_previous_page(self):
self.driver.back()
def go_to_next_page(self):
self.driver.forward()
def refresh_page(self):
self.driver.refresh()
def go_to_login_page(self):
self.driver.get(self.login_url)
self.is_the_current_page(do_assert=True)
| apache-2.0 | -6,881,513,618,528,521,000 | 33.93617 | 78 | 0.636114 | false | 4 | false | false | false |
blacktear23/py-servicebus | servicebus/pika/frame.py | 1 | 7777 | """Frame objects that do the frame demarshaling and marshaling."""
import logging
import struct
from servicebus.pika import amqp_object
from servicebus.pika import exceptions
from servicebus.pika import spec
from servicebus.pika.compat import byte
LOGGER = logging.getLogger(__name__)
class Frame(amqp_object.AMQPObject):
"""Base Frame object mapping. Defines a behavior for all child classes for
assignment of core attributes and implementation of the a core _marshal
method which child classes use to create the binary AMQP frame.
"""
NAME = 'Frame'
def __init__(self, frame_type, channel_number):
"""Create a new instance of a frame
:param int frame_type: The frame type
:param int channel_number: The channel number for the frame
"""
self.frame_type = frame_type
self.channel_number = channel_number
def _marshal(self, pieces):
"""Create the full AMQP wire protocol frame data representation
:rtype: bytes
"""
payload = b''.join(pieces)
return struct.pack('>BHI', self.frame_type, self.channel_number,
len(payload)) + payload + byte(spec.FRAME_END)
def marshal(self):
"""To be ended by child classes
:raises NotImplementedError
"""
raise NotImplementedError
class Method(Frame):
"""Base Method frame object mapping. AMQP method frames are mapped on top
of this class for creating or accessing their data and attributes.
"""
NAME = 'METHOD'
def __init__(self, channel_number, method):
"""Create a new instance of a frame
:param int channel_number: The frame type
:param pika.Spec.Class.Method method: The AMQP Class.Method
"""
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
self.method = method
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.method.encode()
pieces.insert(0, struct.pack('>I', self.method.INDEX))
return self._marshal(pieces)
class Header(Frame):
"""Header frame object mapping. AMQP content header frames are mapped
on top of this class for creating or accessing their data and attributes.
"""
NAME = 'Header'
def __init__(self, channel_number, body_size, props):
"""Create a new instance of a AMQP ContentHeader object
:param int channel_number: The channel number for the frame
:param int body_size: The number of bytes for the body
:param pika.spec.BasicProperties props: Basic.Properties object
"""
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
self.body_size = body_size
self.properties = props
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.properties.encode()
pieces.insert(0, struct.pack('>HxxQ', self.properties.INDEX,
self.body_size))
return self._marshal(pieces)
class Body(Frame):
"""Body frame object mapping class. AMQP content body frames are mapped on
to this base class for getting/setting of attributes/data.
"""
NAME = 'Body'
def __init__(self, channel_number, fragment):
"""
Parameters:
- channel_number: int
- fragment: unicode or str
"""
Frame.__init__(self, spec.FRAME_BODY, channel_number)
self.fragment = fragment
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal([self.fragment])
class Heartbeat(Frame):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
NAME = 'Heartbeat'
def __init__(self):
"""Create a new instance of the Heartbeat frame"""
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal(list())
class ProtocolHeader(amqp_object.AMQPObject):
"""AMQP Protocol header frame class which provides a pythonic interface
for creating AMQP Protocol headers
"""
NAME = 'ProtocolHeader'
def __init__(self, major=None, minor=None, revision=None):
"""Construct a Protocol Header frame object for the specified AMQP
version
:param int major: Major version number
:param int minor: Minor version number
:param int revision: Revision
"""
self.frame_type = -1
self.major = major or spec.PROTOCOL_VERSION[0]
self.minor = minor or spec.PROTOCOL_VERSION[1]
self.revision = revision or spec.PROTOCOL_VERSION[2]
def marshal(self):
"""Return the full AMQP wire protocol frame data representation of the
ProtocolHeader frame
:rtype: str
"""
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
self.revision)
def decode_frame(data_in):
"""Receives raw socket data and attempts to turn it into a frame.
Returns bytes used to make the frame and the frame
:param str data_in: The raw data stream
:rtype: tuple(bytes consumed, frame)
:raises: pika.exceptions.InvalidFrameError
"""
# Look to see if it's a protocol header frame
try:
if data_in[0:4] == b'AMQP':
major, minor, revision = struct.unpack_from('BBB', data_in, 5)
return 8, ProtocolHeader(major, minor, revision)
except (IndexError, struct.error):
return 0, None
# Get the Frame Type, Channel Number and Frame Size
try:
(frame_type, channel_number,
frame_size) = struct.unpack('>BHL', data_in[0:7])
except struct.error:
return 0, None
# Get the frame data
frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE
# We don't have all of the frame yet
if frame_end > len(data_in):
return 0, None
# The Frame termination chr is wrong
if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END):
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
# Get the raw frame data
frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1]
if frame_type == spec.FRAME_METHOD:
# Get the Method ID from the frame data
method_id = struct.unpack_from('>I', frame_data)[0]
# Get a Method object for this method_id
method = spec.methods[method_id]()
# Decode the content
method.decode(frame_data, 4)
# Return the amount of data consumed and the Method object
return frame_end, Method(channel_number, method)
elif frame_type == spec.FRAME_HEADER:
# Return the header class and body size
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
# Get the Properties type
properties = spec.props[class_id]()
# Decode the properties out
properties.decode(frame_data[12:])
# Return a Header frame
return frame_end, Header(channel_number, body_size, properties)
elif frame_type == spec.FRAME_BODY:
# Return the amount of data consumed and the Body frame w/ data
return frame_end, Body(channel_number, frame_data)
elif frame_type == spec.FRAME_HEARTBEAT:
# Return the amount of data and a Heartbeat frame
return frame_end, Heartbeat()
raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
| bsd-3-clause | 8,093,776,396,423,722,000 | 28.34717 | 78 | 0.631092 | false | 4.17221 | false | false | false |
Alexoner/learning-web | python/django/djangoByExample/forum/models.py | 1 | 1904 | from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from string import join
# from djangoByExample.settings import MEDIA_ROOT
# Create your models here.
class Forum(models.Model):
title = models.CharField(max_length=60)
def __unicode__(self):
return self.title
def num_posts(self):
return sum([t.num_posts() for t in self.thread_set.all()])
def last_post(self):
if self.thread_set.count():
last = None
for t in self.thread_set.all():
l = t.last_post()
if l:
if not last:
last = l
elif l.created > last.created:
last = l
return last
class Thread(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
forum = models.ForeignKey(Forum)
def __unicode__(self):
return unicode(self.creator) + " - " + self.title
def num_posts(self):
return self.post_set.count()
def num_replies(self):
return self.post_set.count() - 1
def last_post(self):
if self.post_set.count():
return self.post_set.order_by("created")[0]
class Post(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
thread = models.ForeignKey(Thread)
body = models.TextField(max_length=10000)
def __unicode__(self):
return u"%s - %s - %s" % (self.creator, self.thread, self.title)
def short(self):
return u"%s - %s\n%s" % (self.creator, self.title,
self.created.strftime("%b %d,%I:%M %p"))
short.allow_tags = True
| gpl-2.0 | 2,537,485,843,754,796,500 | 28.292308 | 73 | 0.590336 | false | 3.733333 | false | false | false |
IvanJJill/base_prj | base_prj/settings.py | 1 | 2259 | """
Django settings for base_prj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h-ae1kbm7l&-#jmgs5yv94l!r2o=j=hhwzlj#ouitmy%%x0g7o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bicycles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'base_prj.urls'
WSGI_APPLICATION = 'base_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "base_prj"),) # added line to enable dynamic routing for static files
TEMPLATE_DIRS = [os.path.join(os.path.join(BASE_DIR, "base_prj"), "templates")]
| gpl-2.0 | -2,850,854,167,003,178,000 | 24.965517 | 113 | 0.723772 | false | 3.199717 | false | false | false |
iefan/kfjz | jzuser/models.py | 1 | 3381 | #coding:utf8
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
class MyUserManager(BaseUserManager):
def create_user(self, unitsn, unitname, unitgroup, operatorname,password=None):
"""
Creates and saves a User with the given email, unitsn, unitname.
"""
if not unitsn:
raise ValueError('Users must have an sn.')
user = self.model(
unitsn = unitsn,
# email=MyUserManager.normalize_email(email),
unitname=unitname,
unitgroup = unitgroup,
operatorname = operatorname,
# is_staff=False,
# is_active=True,
# is_superuser=False,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, unitsn, unitname, unitgroup, operatorname, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(unitsn,
# email,
password=password,
unitname=unitname,
unitgroup = unitgroup,
operatorname = operatorname,
)
# user.is_staff = True
# user.is_superuser = True
user.is_active = True
user.is_admin = True
# user.is_staff = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser, PermissionsMixin):
unitsn = models.CharField(verbose_name='单位编码', max_length=30, unique=True, db_index=True)
# email = models.EmailField(verbose_name='电子邮箱', max_length=255, unique=True,)
unitname = models.CharField(max_length=100, verbose_name="单位名称")
UNITGROUP_CHOICES = (
('0', u'市残联'),
('1', u'区残联'),
('2', u'医院'),
)
unitgroup = models.CharField(max_length=30, choices=UNITGROUP_CHOICES, verbose_name="单位类别")
operatorname = models.CharField(max_length=30, verbose_name="操作人员")
# unitname = models.DateField()
is_active = models.BooleanField(default=True)
# is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
USERNAME_FIELD = 'unitsn'
REQUIRED_FIELDS = ['unitname', 'unitgroup', 'operatorname']
def get_full_name(self):
# The user is identified by unitsn
return self.unitsn
def get_short_name(self):
# The user is identified by their email address
return self.unitsn
def __unicode__(self):
s= "%s" % (self.unitsn)
return s
class Meta:
verbose_name = "用户信息"
verbose_name_plural = "用户信息"
# app_label = u"信息管理"
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin | gpl-2.0 | 6,165,705,546,778,076,000 | 30.447619 | 95 | 0.603151 | false | 3.78555 | false | false | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/gui/dialog/attributesync.py | 1 | 4608 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib import patterns
from taskcoachlib.thirdparty.pubsub import pub
from taskcoachlib.i18n import _
import wx
class AttributeSync(object):
''' Class used for keeping an attribute of a domain object synchronized with
a control in a dialog. If the user edits the value using the control,
the domain object is changed, using the appropriate command. If the
attribute of the domain object is changed (e.g. in another dialog) the
value of the control is updated. '''
def __init__(self, attributeGetterName, entry, currentValue, items,
commandClass, editedEventType, changedEventType, callback=None,
**kwargs):
self._getter = attributeGetterName
self._entry = entry
self._currentValue = currentValue
self._items = items
self._commandClass = commandClass
self.__commandKwArgs = kwargs
self.__changedEventType = changedEventType
self.__callback = callback
entry.Bind(editedEventType, self.onAttributeEdited)
if len(items) == 1:
self.__start_observing_attribute(changedEventType, items[0])
def onAttributeEdited(self, event):
event.Skip()
new_value = self.getValue()
if new_value != self._currentValue:
self._currentValue = new_value
commandKwArgs = self.commandKwArgs(new_value)
self._commandClass(None, self._items, **commandKwArgs).do() # pylint: disable=W0142
self.__invokeCallback(new_value)
def onAttributeChanged_Deprecated(self, event): # pylint: disable=W0613
if self._entry:
new_value = getattr(self._items[0], self._getter)()
if new_value != self._currentValue:
self._currentValue = new_value
self.setValue(new_value)
self.__invokeCallback(new_value)
else:
self.__stop_observing_attribute()
def onAttributeChanged(self, newValue, sender):
if sender in self._items:
if self._entry:
if newValue != self._currentValue:
self._currentValue = newValue
self.setValue(newValue)
self.__invokeCallback(newValue)
else:
self.__stop_observing_attribute()
def commandKwArgs(self, new_value):
self.__commandKwArgs['newValue'] = new_value
return self.__commandKwArgs
def setValue(self, new_value):
self._entry.SetValue(new_value)
def getValue(self):
return self._entry.GetValue()
def __invokeCallback(self, value):
if self.__callback is not None:
try:
self.__callback(value)
except Exception, e:
wx.MessageBox(unicode(e), _('Error'), wx.OK)
def __start_observing_attribute(self, eventType, eventSource):
if eventType.startswith('pubsub'):
pub.subscribe(self.onAttributeChanged, eventType)
else:
patterns.Publisher().registerObserver(self.onAttributeChanged_Deprecated,
eventType=eventType,
eventSource=eventSource)
def __stop_observing_attribute(self):
try:
pub.unsubscribe(self.onAttributeChanged, self.__changedEventType)
except pub.UndefinedTopic:
pass
patterns.Publisher().removeObserver(self.onAttributeChanged_Deprecated)
class FontColorSync(AttributeSync):
def setValue(self, newValue):
self._entry.SetColor(newValue)
def getValue(self):
return self._entry.GetColor()
| gpl-3.0 | -6,211,550,586,646,155,000 | 38.421053 | 96 | 0.614149 | false | 4.539901 | false | false | false |
chop-dbhi/varify | varify/genes/resources.py | 1 | 3892 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf.urls import patterns, url
from django.http import Http404
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from preserialize.serialize import serialize
from serrano.resources.base import ThrottledResource
from varify import api
from vdw.genes.models import Gene
class GeneResource(ThrottledResource):
model = Gene
template = api.templates.Gene
def is_not_found(self, request, response, pk):
return not self.model.objects.filter(pk=pk).exists()
@api.cache_resource
def get(self, request, pk):
related = ['chr', 'detail', 'families']
try:
gene = self.model.objects.select_related(*related).get(pk=pk)
except self.model.DoesNotExist:
raise Http404
data = serialize(gene, **self.template)
# The approved symbol and name is listed as a synonym for easier
# searching, but they should be displayed in the output
if data['name'] in data['synonyms']:
data['synonyms'].remove(data['name'])
if data['symbol'] in data['synonyms']:
data['synonyms'].remove(data['symbol'])
return data
class GeneSearchResource(ThrottledResource):
model = Gene
template = api.templates.GeneSearch
def get(self, request):
query = request.GET.get('query')
fuzzy = request.GET.get('fuzzy', 1)
page = request.GET.get('page', 1)
# Use only the currently 'approved' genes
genes = self.model.objects.select_related('synonyms')
# Perform search if a query string is supplied
if query:
if fuzzy == '0' or fuzzy == 'false':
genes = genes.filter(symbol__iexact=query)
else:
genes = genes.filter(synonyms__label__icontains=query)
genes = genes.distinct()
# Paginate the results
paginator = Paginator(genes, api.PAGE_SIZE)
try:
page = page = paginator.page(page)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
resp = {
'result_count': paginator.count,
'results': serialize(page.object_list, **self.template),
}
# Post procesing..
for obj in resp['results']:
# The approved symbol and name is listed as a synonym for easier
# searching, but they should be displayed in the output
if obj['name'] in obj['synonyms']:
obj['synonyms'].remove(obj['name'])
if obj['symbol'] in obj['synonyms']:
obj['synonyms'].remove(obj['symbol'])
obj['_links'] = {
'self': {
'rel': 'self',
'href': reverse('api:genes:gene',
kwargs={'pk': obj['id']})
}
}
links = {}
if page.number != 1:
links['prev'] = {
'rel': 'prev',
'href': "{0}?page={1}".format(reverse('api:genes:search'),
str(page.number - 1))
}
if page.number < paginator.num_pages - 1:
links['next'] = {
'rel': 'next',
'href': "{0}?page={1}".format(reverse('api:genes:search'),
str(page.number + 1))
}
if links:
resp['_links'] = links
return resp
gene_resource = never_cache(GeneResource())
gene_search_resource = never_cache(GeneSearchResource())
urlpatterns = patterns(
'',
url(r'^$', gene_search_resource, name='search'),
url(r'^(?P<pk>\d+)/$', gene_resource, name='gene'),
)
| bsd-2-clause | 8,788,163,061,064,462,000 | 32.551724 | 76 | 0.555755 | false | 4.149254 | false | false | false |
sql-machine-learning/sqlflow | python/runtime/xgboost/tracker.py | 1 | 17518 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This was taken from
# https://github.com/dmlc/dmlc-core/blob/master/tracker/dmlc_tracker/tracker.py
# License: https://github.com/dmlc/dmlc-core/blob/master/LICENSE
# Note: We don't need this file any more while we use xgboost>=1.0.0.
# pai-tf 1.15 doesn't support Python3 which xgboost>=1.0.0 requires.
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import argparse
import logging
import os
import socket
import struct
import subprocess
import sys
import time
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic,
self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successfully setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {
'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port
}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishes the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map,
ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug(
'Recieve %s signal from %s; assign rank %d', s.cmd,
s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started',
nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(target=(lambda: subprocess.check_call(
self.cmd, env=env, shell=True, executable='/bin/bash')),
args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {
'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port
}
def alive(self):
if self.cmd is not None:
return self.thread.isAlive()
else:
return False
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... '
'trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER': nworker, 'DMLC_NUM_SERVER': nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
if rabit.alive():
fun_submit(nworker, nserver, envs)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
if pserver.alive():
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {
'DMLC_NUM_WORKER': args.num_workers,
'DMLC_NUM_SERVER': args.num_servers
}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip),
nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers',
required=True,
type=int,
help='Number of worker proccess to be launched.')
parser.add_argument(
'--num-servers',
default=0,
type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument(
'--host-ip',
default=None,
type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level',
default='INFO',
type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError(
"Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
| apache-2.0 | 5,236,278,495,456,271,000 | 31.321033 | 79 | 0.527572 | false | 3.741563 | false | false | false |
ppke-nlpg/purepos-python3 | purepos/model/suffixguesser.py | 1 | 3098 | #!/usr/bin/env python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
###############################################################################
# Copyright (c) 2015 Móréh, Tamás
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v3
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/
#
# This file is part of PurePos-Python3.
#
# PurePos-Python3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PurePos-Python3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# Contributors:
# Móréh, Tamás - initial API and implementation
##############################################################################
__author__ = '[email protected]'
import math
UNKNOWN_VALUE = -99.0
class HashSuffixGuesser: # (BaseSuffixGuesser):
@staticmethod
def max_probability_tag(probabilities: dict) -> int:
m = max(probabilities.items(), key=lambda x: x[1])
return m[0]
def __init__(self, freq_table: dict, theta: float):
self.freq_table = freq_table
self.theta = theta
self.theta_plus_one = theta + 1
self.mapper = None
self.lemma_mapper = None
def tag_log_probabilities(self, word) -> dict:
return {k: math.log(v) for k, v in self.tag_probabilities(word).items()}
def tag_probabilities(self, word) -> dict:
mret = dict()
for i in range(len(word), -1, -1):
suffix_value = self.freq_table.get(word[i:], [dict(), 0])
mret.update({tag: (mret.get(tag, 0.0) + (float(val) / suffix_value[1] * self.theta))
/ self.theta_plus_one
for tag, val in suffix_value[0].items()})
return mret
def tag_log_probability(self, word, tag) -> float:
prob = self.tag_probability(word, tag)
return math.log(prob) if prob > 0 else UNKNOWN_VALUE
def tag_probability(self, word, tag) -> float:
if self.mapper is not None:
tag = self.mapper.map(tag)
return self.tag_probabilities(word).get(tag, 0.0)
# todo not used?
def tag_prob_hunpos(self, word, tag) -> float:
ret = 0.0
for i in range(len(word)-1, -1, -1):
suffix_value = self.freq_table.get(word[:i])
if suffix_value is not None:
tag_suff_freq = suffix_value[0].get(tag)
if tag_suff_freq is not None:
ret = (ret + (tag_suff_freq / suffix_value[1] * self.theta))\
/ self.theta_plus_one
else:
break
return ret
def __str__(self):
return str(self.freq_table)
| lgpl-3.0 | -6,601,740,663,050,906,000 | 37.17284 | 96 | 0.584088 | false | 3.676576 | false | false | false |
rajul/tvb-framework | tvb/core/entities/transient/filtering.py | 1 | 5667 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Bogdan Neacsa <[email protected]>
"""
import json
from tvb.core.entities import model
from tvb.basic.filters.chain import FilterChain
class StaticFiltersFactory():
"""
Factory class to build lists with static used filters through the application.
"""
RELEVANT_VIEW = "Relevant view"
FULL_VIEW = "Full view"
@staticmethod
def build_datatype_filters(selected=RELEVANT_VIEW, single_filter=None):
"""
Return all visibility filters for data structure page, or only one filter.
"""
filters = {StaticFiltersFactory.FULL_VIEW: FilterChain(StaticFiltersFactory.FULL_VIEW),
StaticFiltersFactory.RELEVANT_VIEW: FilterChain(StaticFiltersFactory.RELEVANT_VIEW,
[FilterChain.datatype + '.visible'],
[True], operations=["=="])}
if selected is None or len(selected) == 0:
selected = StaticFiltersFactory.RELEVANT_VIEW
if selected in filters:
filters[selected].selected = True
if single_filter is not None:
if single_filter in filters:
return filters[single_filter]
else:
### We have some custom filter to build
return StaticFiltersFactory._build_custom_filter(single_filter)
return filters.values()
@staticmethod
def _build_custom_filter(filter_data):
"""
Param filter_data should be at this point a dictionary of the form:
{'type' : 'fitler_type', 'value' : 'fitler_value'}
If 'filter_type' is not handled just return None.
"""
filter_data = json.loads(filter_data)
if filter_data['type'] == 'from_burst':
return FilterChain('Burst', [FilterChain.datatype + '.fk_parent_burst'],
[filter_data['value']], operations=["=="])
if filter_data['type'] == 'from_datatype':
return FilterChain('Datatypes', [FilterChain.operation + '.parameters'],
[filter_data['value']], operations=["like"])
return None
@staticmethod
def build_operations_filters(simulation_algorithm, logged_user_id):
"""
:returns: list of filters that can be applied on Project View Operations page.
"""
new_filters = []
### Filter by algorithm / categories
new_filter = FilterChain("Omit Views", [FilterChain.algorithm_category + '.display'],
[False], operations=["=="])
new_filters.append(new_filter)
new_filter = FilterChain("Only Upload", [FilterChain.algorithm_category + '.rawinput'],
[True], operations=["=="])
new_filters.append(new_filter)
if simulation_algorithm is not None:
new_filter = FilterChain("Only Simulations", [FilterChain.algorithm_group + '.id'],
[simulation_algorithm.id], operations=["=="])
new_filters.append(new_filter)
### Filter by operation status
filtered_statuses = {model.STATUS_STARTED: "Only Running",
model.STATUS_ERROR: "Only with Errors",
model.STATUS_CANCELED: "Only Canceled",
model.STATUS_FINISHED: "Only Finished",
model.STATUS_PENDING: "Only Pending" }
for status, title in filtered_statuses.iteritems():
new_filter = FilterChain(title, [FilterChain.operation + '.status'], [status], operations=["=="])
new_filters.append(new_filter)
### Filter by author
new_filter = FilterChain("Only mine", [FilterChain.operation + '.fk_launched_by'],
[logged_user_id], operations=["=="])
new_filters.append(new_filter)
### Filter by other flags
new_filter = FilterChain("Only relevant", [FilterChain.operation + '.visible'], [True], operations=["=="])
new_filter.selected = True
new_filters.append(new_filter)
return new_filters
| gpl-2.0 | 6,258,488,630,743,793,000 | 43.273438 | 114 | 0.613552 | false | 4.232263 | false | false | false |
JanCaha/Line-of-Sight-Analyst | src/los/prepare_global_los.py | 1 | 6212 | # coding=utf-8
import math
import arcpy
import functions_validation as fv
import functions_visibility as visibility
from los import functions_arcmap
class PrepareGlobalLoS(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Create Global Lines of Sight"
self.description = "A tool to create Lines of Sight from observer to target points and further beyond target " \
"points to the spatial extent of the surface layer. This is necessary to analyze targets " \
"relation to the global horizon. The shapefile itself does not store information about " \
"observer's and target's offsets. This information is stored in appropriate fields."
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
param0 = arcpy.Parameter(
displayName="Surface",
name="in_surface",
datatype="GPRasterLayer",
parameterType="Required",
direction="Input")
param1 = arcpy.Parameter(
displayName="Observer points",
name="in_observers",
datatype="GPFeatureLayer",
parameterType="Required",
direction="Input")
param1.filter.list = ["Point"]
param2 = arcpy.Parameter(
displayName="Observer points offset",
name="in_observer_offset",
datatype="Field",
parameterType="Required",
direction="Input")
param2.filter.list = ["Double"]
param2.parameterDependencies = [param1.name]
param2.enabled = 0
param3 = arcpy.Parameter(
displayName="Target points",
name="in_targets",
datatype="GPFeatureLayer",
parameterType="Required",
direction="Input")
param3.filter.list = ["Point"]
param4 = arcpy.Parameter(
displayName="Target points offset",
name="in_target_offset",
datatype="Field",
parameterType="Required",
direction="Input")
param4.filter.list = ["Double"]
param4.parameterDependencies = [param3.name]
param4.enabled = 0
param5 = arcpy.Parameter(
displayName="Sampling distance",
name="in_sampling_distance",
datatype="GPDouble",
parameterType="Required",
direction="Input")
# param5.value = 0
param6 = arcpy.Parameter(
displayName="Output feature layer",
name="in_output_layer",
datatype="GPFeatureLayer",
parameterType="Required",
direction="output")
params = [param0, param1, param2, param3, param4, param5, param6]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
if arcpy.CheckOutExtension("spatial") == "CheckedOut" and arcpy.CheckOutExtension("3D") == "CheckedOut":
return True
else:
return False
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
fv.enableParamIfPoint(parameters, 1, 2)
fv.enableParamIfPoint(parameters, 3, 4)
if not parameters[6].value:
parameters[6].value = str(arcpy.env.workspace) + "\\Global_LoS"
if parameters[0].value and not parameters[5].altered:
parameters[5].value = str(arcpy.Describe(parameters[0].valueAsText).meanCellHeight)
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
fv.checkProjected(parameters, 1)
fv.checkProjected(parameters, 3)
return
def execute(self, parameters, messages):
"""The source code of the tool."""
surface = parameters[0].valueAsText
observer_points = parameters[1].valueAsText
observer_offset = parameters[2].valueAsText
target_points = parameters[3].valueAsText
target_offset = parameters[4].valueAsText
sampling_distance = parameters[5].valueAsText
output_los = parameters[6].valueAsText
sightlines = arcpy.ConstructSightLines_3d(observer_points, target_points,
arcpy.CreateScratchName(prefix="sightlines",
workspace=arcpy.env.scratchGDB),
observer_offset, target_offset, "<None>", 1,
"NOT_OUTPUT_THE_DIRECTION")
raster_extent = arcpy.sa.Raster(surface).extent
maximal_possible_distance = math.sqrt(
math.pow(max(raster_extent.XMax - raster_extent.XMin, raster_extent.YMax - raster_extent.YMin), 2) * 2)
spatial_ref = arcpy.Describe(sightlines).spatialReference
visibility.makeGlobalLoS(sightlines, maximal_possible_distance, spatial_ref)
arcpy.AddField_management(sightlines, "ID_OBSERV", "LONG")
arcpy.CalculateField_management(sightlines, "ID_OBSERV", "!OID_OBSERV!", "PYTHON")
arcpy.AddField_management(sightlines, "ID_TARGET", "LONG")
arcpy.CalculateField_management(sightlines, "ID_TARGET", "!OID_TARGET!", "PYTHON")
arcpy.DeleteField_management(sightlines, ["OID_TARGET", "OID_OBSERV"])
temp_los_name = arcpy.CreateScratchName(prefix="los", workspace=arcpy.env.scratchGDB)
arcpy.InterpolateShape_3d(surface, sightlines, temp_los_name, sample_distance=sampling_distance, method="BILINEAR")
visibility.updateLoS(temp_los_name, output_los, sightlines, target_points, True)
arcpy.DeleteField_management(output_los, "SourceOID")
visibility.verifyShapeStructure(sightlines, output_los)
functions_arcmap.addLayer(output_los)
return
| gpl-3.0 | 6,361,416,436,611,535,000 | 39.337662 | 123 | 0.611558 | false | 4.488439 | false | false | false |
arizvisa/syringe | template/archive/lha.py | 1 | 5683 | import ptypes
from ptypes import *
# integral types
class u8(pint.uint8_t): pass
class u16(pint.uint16_t): pass
class u32(pint.uint32_t): pass
class u64(pint.uint64_t): pass
class s8(pint.sint8_t): pass
class s16(pint.sint16_t): pass
class s32(pint.sint32_t): pass
class s64(pint.sint64_t): pass
# lzh-specific integrals
class method_id(pstr.string):
length = 5
def set(self, value):
if not isinstance(value, tuple):
return super(Signature._method, self).set(value)
type, version = value
if type == 'lh':
versionmap = '0123456789abcdef'
if version is None:
version = versionmap.index('d')
elif version == 'x':
return super(Signature._method, self).set('-lhx-')
try:
res = '-lh{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type in {'pc', 'pm'}:
versionmap = '012'
if version is None:
res = '-{:s}0-'.format(type)
return super(Signature._method, self).set(res)
elif version == 's':
res = '-{:s}s-'.format(type)
return super(Signature._method, self).set(res)
try:
res = '-{:s}{:s}-'.format(type, versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type == 'lz':
versionmap = '012345678'
if version == 's':
res = '-lzs-'
return super(Signature._method, self).set(res)
elif version is None:
res = '-lz4-'
return super(Signature._method, self).set(res)
try:
res = '-lz{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
raise NotImplementedError((type, version))
def get(self):
res = self.str()
if res.startswith('-') and res.endswith('-'):
res = res[1:-1]
if res.startswith('lh'):
versionmap = '0123456789abcdef'
res = res[2:]
if res == 'd':
return 'lh', None
elif res == 'x':
return 'lh', 'x'
return 'lh', versionmap.index(res)
elif res.startswith('pc') or res.startswith('pm'):
type, version = res[:2], res[2:]
versionmap = '012'
if version == 's':
return type, version
return type, versionmap.index(version)
elif res.startswith('lz'):
versionmap = '012345678'
type, version = res[:2], res[2:]
if version == 's':
return 'lz', version
elif version == '4':
return 'lz', None
return 'lz', versionmap.index(version)
raise NotImplementedError
raise ValueError(res)
# extension header levels
class Level(ptype.definition): cache = {}
@Level.define
class Level0(pstruct.type):
type = 0
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
]
@Level.define
class Level1(pstruct.type):
type = 1
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
(u8, 'os-identifier'),
(u16, 'next-header-size'),
]
# base structures
class Signature(pstruct.type):
_fields_ = [
(u8, 'size'),
(u8, 'checksum'),
(method_id, 'method'),
]
class Attributes(pstruct.type):
class _timestamp(u32): pass
class _attribute(u8): pass
_fields_ = [
(u32, 'compressed-size'),
(u32, 'uncompressed-size'),
(_timestamp, 'timestamp'),
(_attribute, 'file-attribute'),
(u8, 'level-identifier'),
]
def Level(self):
return self['level-identifier'].int()
class Header(pstruct.type):
def __extended(self):
res = self['attributes'].li
return Level.lookup(res.Level())
def __padding_header(self):
res = self['signature'].li
cb = res['size'].int()
total = 2 + sum(self[fld].li.size() for fld in ['signature', 'attributes', 'extended'])
return dyn.block(max(0, cb - total))
_fields_ = [
(Signature, 'signature'),
(Attributes, 'attributes'),
(__extended, 'extended'),
(__padding_header, 'padding'),
]
class File(pstruct.type):
def __data(self):
res = self['header'].li
return dyn.block(res['attributes']['compressed-size'].int())
_fields_ = [
(Header, 'header'),
(__data, 'data'),
]
if __name__ == '__main__':
import ptypes, archive.lha
ptypes.setsource(ptypes.prov.file('c:/users/user/Downloads/fcgb2.lzh', mode='r'))
z = archive.lha.File()
z = z.l
print(z.source.size())
print(z['header']['signature'])
print(z['header']['attributes'])
print(z['header'])
print(z['header']['filename'])
| bsd-2-clause | 6,015,793,706,967,043,000 | 29.390374 | 95 | 0.521204 | false | 3.887141 | false | false | false |
kawamon/hue | desktop/core/ext-py/djangorestframework-3.9.4/rest_framework/management/commands/generateschema.py | 3 | 1501 | from django.core.management.base import BaseCommand
from rest_framework.compat import coreapi
from rest_framework.renderers import (
CoreJSONRenderer, JSONOpenAPIRenderer, OpenAPIRenderer
)
from rest_framework.schemas.generators import SchemaGenerator
class Command(BaseCommand):
help = "Generates configured API schema for project."
def add_arguments(self, parser):
parser.add_argument('--title', dest="title", default=None, type=str)
parser.add_argument('--url', dest="url", default=None, type=str)
parser.add_argument('--description', dest="description", default=None, type=str)
parser.add_argument('--format', dest="format", choices=['openapi', 'openapi-json', 'corejson'], default='openapi', type=str)
def handle(self, *args, **options):
assert coreapi is not None, 'coreapi must be installed.'
generator = SchemaGenerator(
url=options['url'],
title=options['title'],
description=options['description']
)
schema = generator.get_schema(request=None, public=True)
renderer = self.get_renderer(options['format'])
output = renderer.render(schema, renderer_context={})
self.stdout.write(output.decode('utf-8'))
def get_renderer(self, format):
renderer_cls = {
'corejson': CoreJSONRenderer,
'openapi': OpenAPIRenderer,
'openapi-json': JSONOpenAPIRenderer,
}[format]
return renderer_cls()
| apache-2.0 | 6,174,646,717,633,221,000 | 35.609756 | 132 | 0.658228 | false | 4.204482 | false | false | false |
TheLazyHase/dragon_dice_simulator | business/effect/targeted.py | 1 | 9121 | # -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from business.effect import Effect
class TargetedKillEffect(Effect):
@property
def name(self):
return 'Slay'
@property
def description(self):
return '%s targeted ennemy units must roll ID or die' % self.amount
@property
def key(self):
return 'targeted_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedManeuverKillByHealthEffect(Effect):
@property
def name(self):
return 'Stomp'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a maneuver or die' % self.amount
@property
def key(self):
return 'targeted_maneuver_kill_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedManeuverKillBuryingByHealthEffect(Effect):
@property
def name(self):
return 'Crush'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a maneuver or die. Thoses who die must roll a save or be buried.' % self.amount
@property
def key(self):
return 'targeted_maneuver_kill_bury_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedKillBuryingByHealthEffect(Effect):
@property
def name(self):
return 'Poison'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a save or die. Thoses who die must roll a save or be buried.' % self.amount
@property
def key(self):
return 'targeted_kill_bury_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedBuryEffect(Effect):
@property
def name(self):
return 'Swallow'
@property
def description(self):
return '%s targeted ennemy units must roll ID or be killed and buried' % self.amount
@property
def key(self):
return 'targeted_bury'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedDamageEffect(Effect):
@property
def name(self):
return 'Bullseye'
@property
def description(self):
return '%s damage targeted as the active player choose' % self.amount
@property
def key(self):
return 'targeted_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Kick'
@property
def description(self):
return '%s chosen unit suffer %s damages' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableBuryingDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Flaming Arrows'
@property
def description(self):
return '%s chosen unit suffer %s damages ; killed unit must save or be buried' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_burying_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableInstantBuryingDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Gore'
@property
def description(self):
return '%s chosen unit suffer %s damages ; killed unit are buried' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_instant_burying_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedIDKillEffect(Effect):
@property
def name(self):
return 'Decapitate/Impale'
@property
def description(self):
return 'After save are rolled, as an Instant effect, choose and kill %s unit(s) that rolled an ID' % (self.amount)
@property
def key(self):
return 'targeted_ID_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedIDKillByHealthEffect(Effect):
@property
def name(self):
return 'Choke'
@property
def description(self):
return 'After save are rolled, as an Instant effect, choose and kill up to %s worth of health unit(s) that rolled an ID' % (self.amount)
@property
def key(self):
return 'targeted_ID_kill_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedJawDragonKillEffect(Effect):
@property
def name(self):
return 'Decapitate/Impale'
def __init__(self, amount, default_damage):
self.amount = amount
self.expired = False
self.default_damage = default_damage
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.default_damage == effect.default_damage):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def description(self):
return 'Kill up to %s dragon(s) that rolled jaw ; if there is not enough targets, inflict %s damage to any dragon' % (self.amount, self.default_damage)
@property
def key(self):
return 'targeted_jaws_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
| gpl-3.0 | 7,814,475,981,055,872,000 | 30.236301 | 159 | 0.656836 | false | 3.988194 | false | false | false |
chleh/pvd-tool | src/pvd_tool.py | 1 | 48176 | #!/usr/bin/python
#
# Copyright (C) 2015 Christoph Lehmann
#
# This file is part of pvd-tool.
#
# pvd-tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pvd-tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pvd-tool. If not, see <http://www.gnu.org/licenses/>.
#
# TODO
# * more metadata written to csv (timestamp, cmdline args, ...)
import sys
import argparse
import os
import xml.etree.cElementTree as ET
import numpy as np
import re
from collections import OrderedDict
import json
import imp
import time # for performance measurement
import numbers
import six
import plot
from helpers import *
# Plot = plot.MPLPlot
Plot = plot.GnuPlot
time_total = time.time()
time_import_vtk = 0.0
def die(msg, status=1):
sys.stderr.write(msg)
sys.exit(status)
def warn(msg):
sys.stderr.write("WARNING: {0}\n".format(msg))
class JsonSer(json.JSONEncoder):
def default(self, o):
try:
iterable = iter(o)
return OrderedDict(iterable)
except TypeError:
if isinstance(o, (DoV, Point, Cell)):
return str(o)
return json.JSONEncoder.default(self, o)
def getFilesTimes(xmlTree, pathroot):
node = xmlTree.getroot()
if node.tag != "VTKFile": return None, None
children = list(node)
if len(children) != 1: return None, None
node = children[0]
if node.tag != "Collection": return None, None
ts = []
fs = []
for child in node:
if child.tag != "DataSet": return None, None
ts.append(float(child.get("timestep")))
fs.append(relpathfrom(pathroot, child.get("file")))
return ts, fs
def relpathfrom(origin, relpath):
if os.path.isabs(relpath):
return relpath
return os.path.join(origin, relpath)
# TODO allow for wildcard attributes
# returns: list of (index, name)
def get_attribute_idcs(fieldData, attrs):
idcs = []
idcs_set = set()
for a in attrs:
found = False
num_arr = fieldData.GetNumberOfArrays()
for i in xrange(num_arr):
n = fieldData.GetArray(i).GetName()
if a.matches(n):
if i not in idcs_set:
idcs.append((i, n, a.get_axis()))
idcs_set.add(i)
found = True
# if num_arr != 0 and not found:
# warn("Attribute %s not found" % a)
return idcs
def apply_script(fcts, timesteps, grids):
assert len(timesteps) == len(grids)
res = [ None for _ in range(len(grids)) ]
for i in xrange(len(grids)):
ts = timesteps[i]
grid = grids[i]
ngrid = vtk.vtkUnstructuredGrid()
ngrid.DeepCopy(grid)
# TODO extend for cells
gridPoints = ngrid.GetPoints()
numPt = gridPoints.GetNumberOfPoints()
gridPD = ngrid.GetPointData()
for ai in xrange(gridPD.GetNumberOfArrays()):
arr = gridPD.GetArray(ai)
attr = arr.GetName()
for pi in xrange(numPt):
coords = gridPoints.GetPoint(pi)
tup = arr.GetTuple(pi)
if attr in fcts:
ntup = fcts[attr](ts, coords)
if type(ntup) == float: ntup = (ntup,)
assert len(tup) == len(ntup)
else:
warn("no function found for attribute {}".format(attr))
ntup = None
arr.SetTuple(pi, ntup)
res[i] = ngrid
return res
def get_point_data_from_grid(
point, point_id, grid, src, attrIdcs, attrData, incl_coords,
rec, meta
):
gridPoints = grid.GetPoints()
npts = gridPoints.GetNumberOfPoints()
if point_id >= npts or point_id < 0:
warn("point index {} out of bounds [0,{}]\n".format(point_id, npts-1))
return
if incl_coords:
coords = gridPoints.GetPoint(point_id)
for ci, coord in enumerate(coords):
rec.append(coord)
meta.append(Meta(src, DoV.DOM, "coord", ci, point))
for ai, a in enumerate(attrData):
an = attrIdcs[ai][1]
comps = a.GetTuple(point_id)
for ci, comp in enumerate(comps):
comp = comps[ci]
rec.append(comp)
meta.append(Meta(src, DoV.VAL, an, ci, point, axis=attrIdcs[ai][2]))
def get_cell_data_from_grid(
cell, grid, src, attrIdcs, attrData, incl_coords,
rec, meta
):
gridCells = grid.GetCells()
ncells = gridCells.GetNumberOfCells()
c = cell.get()
if c >= ncells or c < 0:
warn("{} out of bounds [0,{}]\n".format(cell, ncells-1))
return
if incl_coords:
# add dummy coordinates
coords = grid.GetPoints().GetPoint(0)
for ci, _ in enumerate(coords):
rec.append(0.0)
meta.append(Meta(src, DoV.DOM, "coord", ci, cell))
for ai, a in enumerate(attrData):
an = attrIdcs[ai][1]
comps = a.GetTuple(c)
for ci, comp in enumerate(comps):
comp = comps[ci]
rec.append(comp)
meta.append(Meta(src, DoV.VAL, an, ci, cell, axis=attrIdcs[ai][2]))
def filter_grid_ts(src, grid, timestep, attrs, points_cells, incl_coords):
gridPoints = grid.GetPoints()
gridCells = grid.GetCells()
attrIdcsPt = get_attribute_idcs(grid.GetPointData(), attrs)
attrDataPt = [ grid.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsPt ]
attrIdcsCell = get_attribute_idcs(grid.GetCellData(), attrs)
attrDataCell = [ grid.GetCellData().GetArray(i) for i, _1, _2 in attrIdcsCell ]
npts = gridPoints.GetNumberOfPoints()
ncells = gridCells.GetNumberOfCells()
if (npts + ncells) > 0:
# categorize points: index or coordinates
coord_pts = []
map_point_indices = {} # maps point index in the list to point index in probeFilter
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
coords = point_cell.get_coords()
if coords:
map_point_indices[i] = len(coord_pts)
coord_pts.append(coords)
if coord_pts:
interpPts = vtk.vtkPoints()
for c in coord_pts:
interpPts.InsertNextPoint(*c)
interpData = vtk.vtkPolyData()
interpData.SetPoints(interpPts)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(grid)
probeFilter.SetInputData(interpData)
probeFilter.Update()
grid_interpolated = probeFilter.GetOutput()
attrIdcsCoords = get_attribute_idcs(grid_interpolated.GetPointData(), attrs)
attrDataCoords = [ grid_interpolated.GetPointData().GetArray(i) for i, _ in attrIdcsCoords ]
rec = []
meta = []
rec.append(timestep)
meta.append(Meta(src, DoV.TIM, "time"))
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
if point_cell.get_coords():
p = map_point_indices[i]
get_point_data_from_grid(point_cell, p, grid_interpolated, src, attrIdcsCoords, attrDataCoords, incl_coords,
rec, meta)
else:
p = point_cell.get()
get_point_data_from_grid(point_cell, p, grid, src, attrIdcsPt, attrDataPt, incl_coords,
rec, meta)
elif isinstance(point_cell, Cell):
get_cell_data_from_grid(point_cell, grid, src, attrIdcsCell, attrDataCell, incl_coords,
rec, meta)
else:
print("Error: Given object is neither point nor cell index")
assert False
return rec, MetaList(meta)
return None, None
def filter_grid_dom(src, grid, attrs, points_cells, incl_coords):
gridPoints = grid.GetPoints()
gridCells = grid.GetCells()
attrIdcsPt = get_attribute_idcs(grid.GetPointData(), attrs)
attrDataPt = [ grid.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsPt ]
attrIdcsCell = get_attribute_idcs(grid.GetCellData(), attrs)
attrDataCell = [ grid.GetCellData().GetArray(i) for i, _1, _2 in attrIdcsCell ]
npts = gridPoints.GetNumberOfPoints()
ncells = gridCells.GetNumberOfCells()
if points_cells is None:
points_cells = [ Point(i) for i in range(npts) ]
if (npts + ncells) > 0:
# categorize points: index or coordinates
coord_pts = []
map_point_indices = {} # maps point index in the list to point index in probeFilter
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
coords = point_cell.get_coords()
if coords:
map_point_indices[i] = len(coord_pts)
coord_pts.append(coords)
if coord_pts:
interpPts = vtk.vtkPoints()
for c in coord_pts:
interpPts.InsertNextPoint(*c)
interpData = vtk.vtkPolyData()
interpData.SetPoints(interpPts)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(grid)
probeFilter.SetInputData(interpData)
probeFilter.Update()
grid_interpolated = probeFilter.GetOutput()
attrIdcsCoords = get_attribute_idcs(grid_interpolated.GetPointData(), attrs)
attrDataCoords = [ grid_interpolated.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsCoords ]
recs = []
meta = []
meta.append(Meta(src, DoV.TIM, "ordinal number or dist from first point"))
first_loop = True
for i, point_cell in enumerate(points_cells):
x = point_cell.get_x_value()
if x is None: x = i
rec = [ x ]
tmp_meta = []
if isinstance(point_cell, Point):
if point_cell.get_coords():
p = map_point_indices[i]
get_point_data_from_grid(point_cell, p, grid_interpolated, src, attrIdcsCoords,
attrDataCoords, incl_coords, rec, tmp_meta)
else:
p = point_cell.get()
get_point_data_from_grid(point_cell, p, grid, src, attrIdcsPt,
attrDataPt, incl_coords, rec, tmp_meta)
elif isinstance(point_cell, Cell):
get_cell_data_from_grid(point_cell, grid, src, attrIdcsCell,
attrDataCell, incl_coords, rec, tmp_meta)
else:
print("Error: Given object is neither point nor cell index")
assert False
if first_loop:
first_loop = False
meta.extend(tmp_meta)
recs.append(rec)
return recs, MetaList(meta)
return None, None
def filter_grid_dom_old(src, grid, attrs):
gridPoints = grid.GetPoints()
attrIdcs = get_attribute_idcs(grid.GetPointData(), attrs)
attrData = [ grid.GetPointData().GetArray(i) for i, _ in attrIdcs ]
npts = gridPoints.GetNumberOfPoints()
meta = []
recs = []
first_loop = True
for p in xrange(gridPoints.GetNumberOfPoints()):
rec = []
coords = gridPoints.GetPoint(p)
for ci in xrange(len(coords)):
coord = coords[ci]
rec.append(coord)
if first_loop: meta.append(Meta(src, DoV.DOM, "coord %i" % ci))
for ai in xrange(len(attrData)):
a = attrData[ai]
an = attrIdcs[ai][1]
comps = a.GetTuple(p)
for ci in xrange(len(comps)):
comp = comps[ci]
rec.append(comp)
if first_loop: meta.append(Meta(src, DoV.VAL, "%s[%i]" % (an, ci)))
first_loop = False
recs.append(rec)
return recs, MetaList(meta)
def write_pvd(outfh, timesteps, vtus):
assert len(timesteps) == len(vtus)
outfh.write('<?xml version="1.0"?>\n'
'<VTKFile type="Collection" version="0.1" byte_order="LittleEndian" compressor="vtkZLibDataCompressor">\n'
' <Collection>\n')
for i in range(len(timesteps)):
outfh.write(' <DataSet timestep="{0}" group="" part="0" file="{1}"/>\n'.format(timesteps[i], vtus[i]))
outfh.write(" </Collection>\n"
"</VTKFile>")
outfh.close()
def write_csv(meta, records, outFile, precision, json_enc):
header = "Columns:\n"
header2 = "\n"
nc = len(meta)
header += "[\n"
old_meta = None
for i in xrange(nc):
# TODO: more tabular format for header
if old_meta and (
old_meta.src != meta[i].src or old_meta.dov != meta[i].dov
or old_meta.pex != meta[i].pex or old_meta.tfm != meta[i].tfm
): header += "\n"
meta[i].col = "{0:2}".format(i+1)
header += " {0}".format(json_enc.encode(meta[i]))
if i != nc-1: header += ","
header += "\n"
old_meta = meta[i]
# if i == 0:
# header2 += "{{:>{}}}".format(precision+5).format(meta[i].attr)
# else:
if i != 0:
header2 += " "
colwidth = precision + 7
else:
colwidth = precision + 5
attr = meta[i].attr
if len(attr) > colwidth:
attr = attr[:colwidth-2] + ".."
header2 += "{{:>{}}}".format(colwidth).format(attr)
header += "]\n" + header2
np.savetxt(outFile, records,
delimiter=" ",
fmt="%{}.{}g".format(precision+7, precision),
header=header)
def read_csv(fh, parse_header=True):
if isinstance(fh, six.string_types):
with open(fh) as fh_:
return read_csv(fh_, parse_header)
meta = None
if parse_header:
mode = 0 # initial
json_str = ""
while True:
lastpos = fh.tell()
line = fh.readline()
if not line: break
if line.startswith("#"):
line = line.lstrip("#").lstrip()
if mode == 0:
if line.startswith("Columns:"):
mode = 1
elif mode == 1: # "Columns:" in previous line
if line.rstrip() == "[":
mode = 2
json_str += line
elif not line:
# ignore empty line
pass
else:
warn("Unexpected header format. I will not attempt to process it.")
break
elif mode == 2: # assemble json
json_str += line
if line.rstrip() == "]":
break
elif not line.strip():
# ignore empty line
pass
else:
# no comment line
warn("unexpected end of header. Json found so far:\n{0}".format(json))
json_str = None
fh.seek(lastpos)
break
if json:
meta = MetaList(json.loads(json_str, object_hook=Meta))
arr = np.loadtxt(fh)
return arr, meta
def gather_files(infh):
if isinstance(infh, str):
fn = infh
else:
fn = infh.name
if fn.endswith(".pvd"):
pathroot = os.path.dirname(fn)
pcdtree = ET.parse(infh)
timesteps, files = getFilesTimes(pcdtree, pathroot)
elif fn.endswith(".vtu"):
timesteps = [0]
files = [ fn ]
else:
die("File `%s' has unknown type" % fn)
return timesteps, files
def gather_grids(infh, reader, filefilter=None):
def get_grid(path):
reader.SetFileName(path)
reader.Update()
g = vtk.vtkUnstructuredGrid()
g.DeepCopy(reader.GetOutput())
return g
timesteps, fs = gather_files(infh)
grids = [ None ] * len(timesteps)
for i, (f, t) in enumerate(zip(fs, timesteps)):
if (not filefilter) or filefilter.filter(t, f):
grids[i] = get_grid(f)
return timesteps, grids, fs
def get_timeseries(src, grids, tss, attrs, points, incl_coords):
oldMeta = None
records = []
for i in xrange(len(grids)):
rec, meta = filter_grid_ts(src, grids[i], tss[i], attrs, points, incl_coords)
if rec is not None:
if oldMeta is None:
oldMeta = meta
else:
assert meta == oldMeta
records.append(rec)
return records, oldMeta
def get_point_data(src, grids, attrs, points_cells, output_coords):
oldMeta = None
records = []
meta = []
for i, g in enumerate(grids):
if g:
recs, meta = filter_grid_dom(src, g, attrs, points_cells, output_coords)
if oldMeta is None:
oldMeta = meta
else:
assert meta == oldMeta
records.append(recs)
else:
records.append(())
return records, meta
def combine_arrays(arrays):
if len(arrays) == 1: return arrays[0]
res = []
nr = len(arrays[0])
na = len(arrays)
for ri in range(nr):
row = []
for ai in range(na):
assert len(arrays[ai]) == nr
row += arrays[ai][ri]
res.append(row)
return res
def combine_domains(metas, recs):
ncols = len(metas)
nmetas = []
nrecs = []
first_row = True
for row in recs:
assert len(row) == ncols
lbls = {}
nrow = []
for ci in range(ncols):
m = metas[ci]
val = row[ci]
if m.dov != DoV.VAL:
lbl = m.get_attr_id()
if lbl in lbls:
assert val == row[lbls[lbl]]
else:
lbls[lbl] = ci
nrow.append(val)
if first_row:
nmeta = Meta(m)
nmeta.src = None
nmetas.append(nmeta)
else:
nrow.append(val)
if first_row:
nmeta = Meta(m)
nmetas.append(nmeta)
first_row = False
nrecs.append(nrow)
return nmetas, nrecs
# argparse types
def InputFile(val):
parts = val.split(":", 2)
if len(parts) == 2:
try:
if parts[1] == "-":
fh = sys.stdin
else:
path = os.path.expanduser(parts[1])
assert os.path.isfile(path) and os.access(path, os.R_OK)
fh = path
except IOError:
warn("Warning: Could not open `{0}', will try `{1}' instead".format(parts[1], val))
else:
return parts[0], fh
if val == "-":
fh = sys.stdin
else:
try:
path = os.path.expanduser(val)
assert os.path.isfile(path) and os.access(path, os.R_OK)
fh = path
except AssertionError as e:
raise argparse.ArgumentTypeError("input file `{}' is not readable or not a file".format(path))
return None, fh
def DirectoryW(val):
# TODO implement
return val
def InputFileArgument(path):
path = os.path.expanduser(path)
assert os.path.isfile(path) and os.access(path, os.R_OK)
return path
def OutputFileArgument(path):
path = os.path.expanduser(path)
assert os.path.isfile(path) and os.access(path, os.W_OK)
return path
re_out_file = re.compile(r'^([%@^][0-9]+)+:')
def OutputFile(val):
m = re_out_file.match(val)
# if not m: raise argparse.ArgumentTypeError("`{0}' does not correspond to the output file path format".format(val))
if m:
path = val[m.end():]
tfm_and_num = val[m.start():m.end()-1]
else:
# TODO maybe add info message
path = val
tfm_and_num = "^0"
try:
if path == "-":
outfh = sys.stdout
else:
path = os.path.expanduser(path)
# assert os.path.isfile(path) and os.access(path, os.W_OK)
with open(path, "w") as outfh:
pass
outfh = path
except IOError as e:
raise argparse.ArgumentTypeError("I/O error({0}) when trying to open `{2}': {1}".format(e.errno, e.strerror, path))
spl = re.split(r'([%@^])', tfm_and_num)
assert len(spl) % 2 == 1 # empty string at the beginning, then pairs of [%@^] and a number
nums_tfms = []
for i in range(1, len(spl), 2):
tfm_char = spl[i]
if tfm_char == '^': do_transform = 0
if tfm_char == '@': do_transform = 1
if tfm_char == '%': do_transform = 2
nums_tfms.append((int(spl[i+1]), do_transform))
return (nums_tfms, outfh)
def OutputDir(val):
m = re_out_file.match(val)
# if not m: raise argparse.ArgumentTypeError("`{0}' does not correspond to the output directory path format".format(val))
if m:
path = val[m.end():]
tfm_and_num = val[m.start():m.end()-1]
else:
# TODO maybe add info message
path = val
tfm_and_num = "^0"
path = os.path.expanduser(path)
d = os.path.dirname(path) or "."
if not os.path.isdir(d):
raise argparse.ArgumentTypeError("`{0}' is not a directory".format(d))
spl = re.split(r'([%@^])', tfm_and_num)
assert len(spl) % 2 == 1 # empty string at the beginning, then pairs of [%@^] and a number
nums_tfms = []
for i in range(1, len(spl), 2):
tfm_char = spl[i]
if tfm_char == '^': do_transform = 0
if tfm_char == '@': do_transform = 1
if tfm_char == '%': do_transform = 2
nums_tfms.append((int(spl[i+1]), do_transform))
return (nums_tfms, path)
def check_consistency_ts(args):
assert args.points_cells is not None and len(args.points_cells) != 0 # at least one point or cell must be chosen
for nums_tfms, _ in args.out_csv or []:
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
for nums_tfms, _ in args.out_plot or []:
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
def check_consistency_dom(args):
# assert (not args.out_pvd) != (not args.attr)
if args.points_cells:
t = None
for pc in args.points_cells:
if t is not None:
assert type(pc) is t # either only points or only cells are allowed
else:
t = type(pc)
for nums_tfms, _ in args.out_csv or []:
# assert len(nums_tfms) == 1 # currently no combination of whole grids allowed
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
for nums_tfms, _ in args.out_pvd or []:
assert len(nums_tfms) == 1 # currently no combination of whole grids allowed
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
def load_input_files(in_files, req_out, script_fh, script_params, filefilter):
assert len(in_files) > 0
# check that all input files are of the same type (either vtu or pvd)
input_type = None
for _, f in in_files:
path = f if isinstance(f, six.string_types) else f.name
if not path: continue
m = re.search(r'[.][^.]*$', path)
if m:
if input_type is None:
input_type = m.group(0)
elif input_type != m.group(0):
print("Error: You must not mix input files of different type!")
assert input_type == m.group(0)
if script_fh is not None and isinstance(script_fh, list): script_fh = script_fh[0]
reader = vtk.vtkXMLUnstructuredGridReader()
scr_loaded = False
if input_type == ".pvd":
timesteps = [ None for _ in range(len(in_files)) ]
vtuFiles = [ None for _ in range(len(in_files)) ]
vtuFiles_transformed = [ None for _ in range(len(in_files)) ]
vtuPaths = [ None for _ in range(len(in_files)) ]
# load and, if necessary, transform source files
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
if not vtuFiles[num]:
timesteps[num], vtuFiles[num], vtuPaths[num] \
= gather_grids(in_files[num][1], reader, filefilter)
if tfm != 0:
assert script_fh is not None
if not scr_loaded:
script_args = {}
for kv in script_params:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
scr_loaded = True
if not vtuFiles_transformed[num]:
vtuFiles_transformed[num] = apply_script(analytical_model.get_attribute_functions(), timesteps[num], vtuFiles[num])
elif input_type == ".vtu":
timesteps = [ [ None ]*len(in_files) ]
vtuFiles = [ [ None ]*len(in_files) ]
vtuFiles_transformed = [ None ]
vtuPaths = [ [ None ]*len(in_files) ]
# load and, if necessary, transform source files
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
assert num == 0
for fi, (_, in_file) in enumerate(in_files):
if filefilter.filter(fi, in_file):
_, vtu, vtuPath = gather_grids(in_file, reader)
timesteps[0][fi] = fi
vtuFiles[0][fi] = vtu[0]
vtuPaths[0][fi] = vtuPath[0]
if tfm != 0:
assert script_fh is not None
if not scr_loaded:
script_args = {}
for kv in script_params:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
scr_loaded = True
if not vtuFiles_transformed[0]:
vtuFiles_transformed[0] = apply_script(analytical_model.get_attribute_functions(), timesteps[0], vtuFiles[0])
return timesteps, vtuFiles, vtuFiles_transformed, vtuPaths
def get_output_data_diff(aggr_data, req_out):
for nums_tfms, outfh in req_out:
meta_attr_comp = {}
meta = []
recs = []
for num, tfm in nums_tfms:
assert tfm == 0
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
for mt in m:
if mt.dov != DoV.VAL: continue
a = mt.attr
c = mt.comp
if a not in meta_attr_comp: meta_attr_comp[a] = set()
meta_attr_comp[a].add(c)
meta = MetaList(meta)
recs = combine_arrays(recs)
for attr, comps in sorted(meta_attr_comp.items()):
print("{} -- {}".format(attr, ", ".join([str(c) for c in comps])))
for comp in comps:
cols = meta.get_columns(attr=attr, comp=comp, dov=DoV.VAL)
if len(cols) < 2:
warn("attribute {}[{}] is only present in one input file. skipping".format(attr, comp))
continue
assert len(cols) == 2
c0 = cols[0]
c1 = cols[1]
meta.append(Meta(None, DoV.VAL, attr + "_diff", comp))
meta.append(Meta(None, DoV.VAL, attr + "_reldiff", comp))
for r in recs:
v0 = r[c0]
v1 = r[c1]
diff = v0-v1
r += [diff, diff / max(abs(v0), abs(v1))]
# for attr, cols in meta.each("attr", dov=DoV.VAL):
# print("{} -- {}".format(attr, ", ".join([str(c) for c in cols])))
yield meta, recs, outfh
class FileFilterByTimestep:
def __init__(self, timesteps):
if timesteps:
self._timesteps = sorted([ float(t) for t in timesteps ])
else:
self._timesteps = None
def filter(self, ts, fn):
if self._timesteps:
for t in self._timesteps:
# print("ts vs t {} {} -- {} ?<? {}".format(ts, t, abs(ts-t), sys.float_info.epsilon))
if abs(ts-t) < sys.float_info.epsilon \
or (ts != 0.0 and abs(ts-t)/ts < 1.e-6):
return True
else:
return True
# split_re matches any number, possibly in scientific notation
split_re = re.compile(r'([+-]?[0-9]+(?:[.][0-9]+)?(?:[eE][+-]?[0-9]+)?)')
# returns a sorted version of the given list like `sort -V`
def version_sort(in_files):
return sorted(in_files, key=lambda f: [
s if i%2==0 else float(s)
for i, s in enumerate(split_re.split(
f[1] if isinstance(f[1], six.string_types) else f[1].name
))
])
# TODO provide a similar function also for similar cases
def process_timeseries_diff(args):
if args.out_plot:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
globals()["mpl"] = mpl
globals()["plt"] = plt
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
assert len(args.points_cells) == 1 # currently only one point at once
if args.out_csv:
# output file uses both input files and not transforms
args.out_csv = [ ([(0, 0), (1, 0)], fh) for fh in args.out_csv ]
if args.out_plot:
# output file uses both input files and not transforms
args.out_plot = [ ([(0, 0), (1, 0)], fh) for fh in args.out_plot ]
req_out = (args.out_csv or []) \
+ (args.out_plot or [])
assert len(req_out) > 0
timesteps, vtuFiles, vtuFiles_transformed, _ = \
load_input_files(in_files, req_out, None, None)
# aggregate timeseries data
aggr_data = [ [ None, None ] for _ in in_files ]
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
assert tfm == 0 # no transformations allowed here
src = in_files[num][0]
if src is None: src = in_files[num][1].name
tss = timesteps[num]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
# TODO find better solution for out_coords
recs, meta = get_timeseries(src, grids, tss, args.attr, args.points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
if args.out_csv:
json_enc = JsonSer()
for meta, recs, outfh in get_output_data_diff(aggr_data, args.out_csv):
if True: #args.combine_domains:
meta, recs = combine_domains(meta, recs)
write_csv(meta, recs, outfh, args.prec[0], json_enc)
if args.out_plot:
plt = Plot(args.prec[0])
for meta, recs, outfh in get_output_data_diff(aggr_data, args.out_plot):
plt.plot_to_file(meta, recs, outfh)
def process_timeseries(args):
if args.out_plot:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
globals()["mpl"] = mpl
globals()["plt"] = plt
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
check_consistency_ts(args)
# there shall be only single points or cells in the list
points_cells = []
for pc in args.points_cells:
pc_flat = pc.flatten()
if pc_flat:
points_cells.extend(pc_flat)
else:
points_cells.append(pc)
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
req_out = (args.out_csv or []) \
+ (args.out_plot or [])
assert len(req_out) > 0
timesteps, vtuFiles, vtuFiles_transformed, _ = \
load_input_files(in_files, req_out, args.script, args.script_param, FileFilterByTimestep(None))
# aggregate timeseries data
aggr_data = [ [ None, None ] for _ in in_files ]
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1]
tss = timesteps[num]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
recs, meta = get_timeseries(src, grids, tss, args.attr, points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
# write csv files
json_enc = JsonSer()
for nums_tfms, outfh in args.out_csv or []:
meta = []
recs = []
for num, tfm in nums_tfms:
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
recs = combine_arrays(recs)
if args.combine_domains:
meta, recs = combine_domains(meta, recs)
write_csv(meta, recs, outfh, args.prec[0], json_enc)
# plot
plt = Plot(args.prec[0])
for nums_tfms, outfh in args.out_plot or []:
meta = []
recs = []
for num, tfm in nums_tfms:
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
recs = combine_arrays(recs)
# plt.plot_to_file(meta, recs, outfh)
plt.add_data(meta, recs, outfh)
plt.do_plots(1)
def process_whole_domain(args):
if not args.attr: args.attr = ['*']
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
check_consistency_dom(args)
if args.points_cells:
# there shall be only single points or cells in the list
points_cells = []
for pc in args.points_cells:
pc_flat = pc.flatten()
if pc_flat:
points_cells.extend(pc_flat)
else:
points_cells.append(pc)
else:
points_cells = None
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
req_out = (args.out_csv or []) \
+ (args.out_pvd or []) \
+ (args.out_plot or [])
timesteps, vtuFiles, vtuFiles_transformed, vtuPaths = \
load_input_files(in_files, req_out, args.script, args.script_param, FileFilterByTimestep(args.timestep))
# write csv files
json_enc = JsonSer()
if args.out_csv or args.out_plot:
# get data
aggr_data = [ [ None, None ] for _ in range(len(in_files)) ]
for nums_tfms, _ in (args.out_csv or []) + (args.out_plot or []):
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2:
rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
# TODO add switch cells/points
recs, meta = get_point_data(src, grids, args.attr, points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
if args.out_csv:
# write csv files
for nums_tfms, outdirn in args.out_csv:
for ti in range(len(timesteps[nums_tfms[0][0]])):
meta = []
recs = []
for num, tfm in nums_tfms:
assert timesteps[num] == timesteps[nums_tfms[0][0]]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r[ti])
meta += m
recs = combine_arrays(recs)
if args.combine_domains:
meta, recs = combine_domains(meta, recs)
if recs:
if len(timesteps) == 1:
fn = outdirn \
+ re.sub(r"[.][^.]+$", ".csv", os.path.basename(vtuPaths[nums_tfms[0][0]][ti]))
else:
t = timesteps[num][ti]
if isinstance(t, numbers.Integral):
max_ts = max(timesteps[num])
width = len(str(max_ts))
fn = ("{}_{:0"+str(width)+"}.csv").format(outdirn, t)
else:
fn = "{}_{}.csv".format(outdirn, t)
print("csv output to {}".format(fn))
write_csv(meta, recs, fn, args.prec[0], json_enc)
if args.out_plot:
assert(args.num_threads >= 0)
# plot
plt = Plot(args.prec[0])
for nums_tfms, outdirn in args.out_plot or []:
for ti in range(len(timesteps[nums_tfms[0][0]])):
meta = []
recs = []
for num, tfm in nums_tfms:
assert timesteps[num] == timesteps[nums_tfms[0][0]]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
# TODO: add x-axis value
recs.append(r[ti])
meta += m
recs = combine_arrays(recs)
if recs:
if len(timesteps) == 1:
fn = outdirn \
+ re.sub(r"[.][^.]+$", ".png", os.path.basename(vtuPaths[nums_tfms[0][0]][ti]))
else:
t = timesteps[num][ti]
if isinstance(t, numbers.Integral):
max_ts = max(timesteps[num])
width = len(str(max_ts))
fn = ("{}_{:0"+str(width)+"}.png").format(outdirn, t)
else:
fn = "{}_{}.png".format(outdirn, t)
print("plot output to {}".format(fn))
if args.update:
if os.path.isfile(fn):
mt_in = os.stat(vtuPaths[nums_tfms[0][0]][ti]).st_mtime
mt_out = os.stat(fn).st_mtime
if mt_out > mt_in:
# print(in_files[ti][1], "is older than out")
continue
plt.add_data(meta, recs, fn)
plt.do_plots(args.num_threads)
# write pvd files
if args.out_pvd:
writer = vtk.vtkXMLUnstructuredGridWriter()
for nums_tfms, outfh in args.out_pvd:
outfn = outfh.name
outf_base = re.sub(r'.pvd', '', outfn)
out_vtus = []
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1].name
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2:
assert tfm != 2
rng = [0,1]
for tfm_idx in rng:
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
for ti in range(len(timesteps[num])):
# TODO: make output file names resemble input file names
fn = "{0}_{1}.vtu".format(outf_base, timesteps[num][ti])
out_vtus.append(fn)
writer.SetFileName(fn)
writer.SetInputData(grids[ti])
writer.Write()
write_pvd(outfh, timesteps[num], out_vtus)
def process_proxy(args):
script_fh = args.script[0]
script_args = {}
for kv in args.script_param:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
analytical_model.proxied(args.in_files, args.out_files)
def _run_main():
parser = argparse.ArgumentParser(description="Process PVD files")
# common
parser_common = argparse.ArgumentParser(description="Common options", add_help=False)
parser_common.add_argument("-s", "--script", nargs=1, type=InputFileArgument, help="script for generating field data, e.g., exact solutions of FEM models")
parser_common.add_argument("--script-param", "--sp", action="append", help="parameters for the script", default=[])
# I/O
parser_io = argparse.ArgumentParser(description="Input/output options", add_help=False)
parser_io.add_argument("-i", "--in", nargs='+', type=InputFile, required=True, help="input file", dest="in_files", metavar="IN_FILE")
parser_io.add_argument("--no-combine-domains", action="store_false", dest="combine_domains", help="do not combine domains when aggregating several input files into one output file")
parser_io.add_argument("--prec", nargs=1, type=int, help="decimal precision for output", default=[14])
parser_io.add_argument("--no-coords", action="store_false", dest="out_coords", help="do not output coordinate columns")
parser_io.add_argument("--version-sort-inputs", "-V", action="store_true", dest="version_sort", help="version sort input file names before further processing")
subparsers = parser.add_subparsers(dest="subcommand", help="subcommands")
subparsers.required = True
parser_frag_ts = argparse.ArgumentParser(description="compute timeseries", add_help=False)
parser_frag_ts.add_argument("-p", "--point", type=Point, action="append", required=False, dest="points_cells")
parser_frag_ts.add_argument("-c", "--cell", type=Cell, action="append", required=False, dest="points_cells")
parser_frag_ts.add_argument("-a", "--attr", type=AttributePack, action="append", required=False)
# timeseries
parser_ts = subparsers.add_parser("timeseries", help="compute timeseries", parents=[parser_io, parser_common, parser_frag_ts])
parser_ts.set_defaults(func=process_timeseries)
parser_ts.add_argument("--out-plot", action="append", type=OutputFile)
parser_ts.add_argument("--out-csv", action="append", type=OutputFile)
# timeseries diff
parser_tsd = subparsers.add_parser("ts-diff", help="compute differences between two timeseries", parents=[parser_frag_ts])
parser_tsd.add_argument("-i", "--in", nargs=2, type=InputFile, required=True, help="input file", dest="in_files", metavar="IN_FILE")
parser_tsd.add_argument("--out-plot", nargs=1, type=OutputFileArgument)
parser_tsd.add_argument("--out-csv", nargs=1, type=OutputFileArgument)
parser_tsd.add_argument("--prec", nargs=1, type=int, help="decimal precision for output", default=[6])
parser_tsd.set_defaults(func=process_timeseries_diff)
# domain
parser_dom = subparsers.add_parser("domain", help="dom help", parents=[parser_io, parser_common, parser_frag_ts])
parser_dom.add_argument("--out-pvd", action="append", type=OutputFile)
parser_dom.add_argument("--out-csv", action="append", type=OutputDir)
parser_dom.add_argument("--out-plot", action="append", type=OutputDir)
parser_dom.add_argument("-t", "--timestep", action="append", required=False)
parser_dom.add_argument("-N", "--num-threads", type=int, default=1)
parser_dom.add_argument("-U", "--update", action="store_true")
parser_dom.set_defaults(func=process_whole_domain)
# proxy
parser_proxy = subparsers.add_parser("proxy", help="proxy help", parents=[parser_common])
parser_proxy.add_argument("-i", "--in", action="append", type=InputFileArgument, help="input file", dest="in_files", metavar="IN_FILE", default=[])
parser_proxy.add_argument("-o", "--out", action="append", type=OutputFileArgument, help="output file", dest="out_files", metavar="OUT_FILE", default=[])
parser_proxy.set_defaults(func=process_proxy)
args = parser.parse_args()
if "attr" in args:
if args.attr:
attrs = []
for a in args.attr:
attrs += a.get_attrs()
args.attr = attrs
else:
args.attr = [ Attribute('*') ]
args.func(args)
global time_total, time_import_vtk
print("total execution took {} seconds".format(time.time() - time_total))
print("importing vtk took {} seconds".format(time_import_vtk))
print("plotting took {} seconds".format(plot.time_plot))
print("saving plots took {} seconds".format(plot.time_plot_save))
if __name__ == "__main__":
_run_main()
else:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
# has to be imported after matplotlib
try:
start_time = time.clock()
import vtk
time_import_vtk = time.clock() - start_time
except ImportError:
warn("module vtk will not be available")
| gpl-3.0 | -5,082,921,904,808,613,000 | 32.855235 | 185 | 0.525905 | false | 3.718432 | false | false | false |
ibest/grcScripts2 | inst/scripts/python/extract_unmapped_reads.py | 1 | 6308 | #!/usr/bin/env python
'''
Extract reads which aren't mapped from a SAM or SAM.gz file.
Behavior for PE:
-Write out PE only if both do not map (if either of the pair maps, neither is retained)
Behavior for SE:
-Write out SE if they don't map
Iterate over a SAM or SAM.gz file. take everything where the 3rd and
4th flag bit are set to 1 and write reads out to files.
0x1 template having multiple segments in sequencing
0x2 each segment properly aligned according to the aligner
0x4 segment unmapped
0x8 next segment in the template unmapped
0x10 SEQ being reverse complemented
0x20 SEQ of the next segment in the template being reversed
0x40 the first segment in the template
0x80 the last segment in the template
0x100 secondary alignment
0x200 not passing quality controls
0x400 PCR or optical duplicate
TODO:
1) Add support for retaining both reads if one of a pair don't map but the other does
2) Add support for retaining the pair (or SE) if a read maps with low mapq
Note:
It is necessary to double check that both pairs of the PE read really exist in the SAM
file just in case it somehow gets disordered. This is taken care of by keeping the PE
reads in a set of dictionaries and then deleting them once the pair is written.
In the case where a read is somehow labeled as paired, but the pair doesn't exist, the
read is NOT written.
'''
import sys
import os
from optparse import OptionParser # http://docs.python.org/library/optparse.html
import gzip
usage = "usage: %prog [options] -o output_base inputfile.SAM"
parser = OptionParser(usage=usage,version="%prog 2.0.0")
parser.add_option('-u', '--uncompressed', help="leave output files uncompressed",
action="store_true", dest="uncompressed")
parser.add_option('-o', '--output_base', help="output file basename",
action="store", type="str", dest="output_base",default="screened")
parser.add_option('-v', '--verbose', help="verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args() # uncomment this line for command line support
if len(args) == 1:
infile = args[0]
#Start opening input/output files:
if not os.path.exists(infile):
print "Error, can't find input file %s" % infile
sys.exit()
if infile.split(".")[-1] == "gz":
insam = gzip.open(infile, 'rb')
else:
insam = open(infile, 'r')
else:
## reading from stdin
insam = sys.stdin
base = options.output_base
PE1 = {}
PE2 = {}
contig_map = {}
def writeread(ID, r1, r2):
#read1
outPE1.write("@" + ID + "#0/1" '\n')
outPE1.write(r1[0] + '\n')
outPE1.write('+\n' + r1[1] + '\n')
#read2
outPE2.write("@" + ID + "#0/2" '\n')
outPE2.write(r2[0] + '\n')
outPE2.write('+\n' + r2[1] + '\n')
i = 0
PE_written = 0
SE_written = 0
SE_open = False
PE_open = False
for line in insam:
if i % 100000 == 0 and i > 0 and options.verbose:
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
#Comment/header lines start with @
if line[0] != "@" and len(line.strip().split()) > 2:
i += 1
line2 = line.strip().split()
flag = int(line2[1])
#Handle SE:
# unapped SE reads have 0x1 set to 0, and 0x4 (third bit) set to 1
if (flag & 0x1 == 0) and (flag & 0x4):
ID = line2[0].split("#")[0]
if not SE_open:
if options.uncompressed:
outSE = open(base + "_SE.fastq", 'w')
else:
outSE = gzip.open(base + "_SE.fastq.gz", 'wb')
SE_open = True
outSE.write("@" + ID + '\n')
outSE.write(line2[9] + '\n')
outSE.write('+\n' + line2[10] + '\n')
SE_written += 1
continue
#Handle PE:
#logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped, 0x80 the last segment in the template
if ((flag & 0x1) and (flag & 0x4) and (flag & 0x8)):
if not PE_open:
if options.uncompressed:
outPE1 = open(base + "_PE1.fastq", 'w')
outPE2 = open(base + "_PE2.fastq", 'w')
else:
outPE1 = gzip.open(base + "_PE1.fastq.gz", 'wb')
outPE2 = gzip.open(base + "_PE2.fastq.gz", 'wb')
PE_open = True
if (flag & 0x40): # is this PE1 (first segment in template)
#PE1 read, check that PE2 is in dict and write out
ID = line2[0].split("#")[0]
r1 = [line2[9], line2[10]] # sequence + qual
if ID in PE2:
writeread(ID, r1, PE2[ID])
del PE2[ID]
PE_written += 1
else:
PE1[ID] = r1
continue
elif (flag & 0x80): # is this PE2 (last segment in template)
#PE2 read, check that PE1 is in dict and write out
ID = line2[0].split("#")[0]
r2 = [line2[9], line2[10]]
if ID in PE1:
writeread(ID, PE1[ID], r2)
del PE1[ID]
PE_written += 1
else:
PE2[ID] = r2
continue
# was mapped, count it up
contig = line2[2]
if contig in contig_map.keys():
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] += 1
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["PE"] += 1
else:
contig_map[contig] = {}
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] = 1
contig_map[contig]["PE"] = 0
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["SE"] = 0
contig_map[contig]["PE"] = 1
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
for k in contig_map.keys():
print "\tFound %s: percent: %.2f, PE mapped: %s, SE mapped: %s" % (k,(2*PE_written+SE_written)/i, contig_map[k]["PE"], contig_map[k]["SE"])
if PE_open:
outPE1.close()
outPE2.close()
if SE_open:
outSE.close()
| apache-2.0 | 5,556,250,830,687,737,000 | 35.462428 | 149 | 0.565155 | false | 3.360682 | false | false | false |
BigFatNoob-NCSU/x9115george2 | hw/code/6/algorithms/max_walk_sat.py | 1 | 3166 | from __future__ import print_function, division
__author__ = 'george'
import sys
import numpy as np
from utils.lib import *
from algorithm import *
def default():
return O(
gens = 10,
max_changes = 100,
change_prob = 0.5,
steps = 10,
threshold = 170,
better = lt,
verbose = True,
step_size = 100
)
class MWS(Algorithm):
def __init__(self, model, settings=None):
if not settings:
settings = default()
Algorithm.__init__(self, model, settings)
def energy(self, decisions, do_norm=True):
"""
Energy function. Used to evaluate
:param decisions: Decisions to be evaluated
:param do_norm: If objectives have to be normalized
:return: Computed energy value
"""
norms = []
objectives = self.model.evaluate(decisions)
if do_norm:
for i, obj in enumerate(objectives):
norms.append(self.model.objectives[i].norm(obj))
return sum(norms)
else:
return sum(objectives)
def run(self):
"""
Runner function to run the
max walk sat algorithm
:return: Best solution, Objectives and number of evals
"""
model = self.model
settings = self.settings
if settings.verbose:
print(model)
print(settings)
evals = 0
decs = model.decisions
front = Front()
for _ in range(settings.gens):
solution = model.generate()
out = ""
for __ in range(settings.max_changes):
evals += 1
rand_index = choice(range(len(decs)))
if settings.change_prob < rand():
clone = list(solution)
clone[rand_index] = within(decs[rand_index].low,
decs[rand_index].high)
if model.check_constraints(clone):
solution = clone
key = " ?"
else:
key = " ."
else:
cloned, int_evals = self.jiggle_solution(solution, rand_index)
evals += int_evals
if cloned != solution:
key = " +"
solution = cloned
else:
key = " ."
out+=key
if settings.verbose:
print(model.evaluate(solution), out)
front.update(Point(solution, model.evaluate(solution)))
front.evals = evals
return front
def jiggle_solution(self, solution, index):
"""
Modify an index in a solution that
leads to the best solution range in that index
"""
t_evals = 0
lo = self.model.decisions[index].low
hi = self.model.decisions[index].high
delta = (hi - lo) / self.settings.step_size
best_soln, best_score = solution, sys.maxint
if self.settings.better == gt:
best_score = -best_score
for val in np.arange(lo, hi+delta, delta):
cloned = list(solution)
cloned[index] = val
t_evals += 1
if not self.model.check_constraints(cloned):
continue
objs = self.model.evaluate(cloned)
objs = self.model.norm_objectives(objs)
t_score = sum(objs)
t_evals += 1
if self.settings.better(t_score, best_score):
best_soln, best_score = list(cloned), t_score
return best_soln, t_evals
| mit | -108,275,731,922,424,450 | 26.77193 | 72 | 0.585597 | false | 3.67712 | false | false | false |
pcampese/codewars | validBraces4.py | 1 | 1142 | # https://www.codewars.com/kata/valid-braces/train/python
def validBraces(braces):
# Print the arguments
print('braces = -->{}<--'.format(braces))
# Create dictionary to map closing brace to opening brace
closing_brace_of = {
'{' : '}',
'[' : ']',
'(' : ')',
}
# Create lists to contain the closing braces
closing = []
# Go through each character. If you see an opening brace, add the corresponding closing brace to the list
result = None
for c in braces:
print('Looking at c = [{}]'.format(c))
if c in closing_brace_of:
print('[{}] is in an opening bracket'.format(c))
closing.append(closing_brace_of[c])
print('closing = {}'.format(closing))
else:
print('[{}] is in a closing bracket'.format(c))
if (not closing or (c != closing.pop())): # If we're looking at a letter, but the closing list is empty or doesn't match what we expect
print('closing is empty')
result = False
break
else:
print('all seems OK')
result = True
print
print
# Make sure all the closing brackets have been used
if (closing):
result = False
print('result = {}'.format(result))
return result | gpl-3.0 | 8,522,807,550,574,706,000 | 25.581395 | 138 | 0.649737 | false | 3.253561 | false | false | false |
peterfpeterson/mantid | scripts/MultiPlotting/edit_windows/remove_plot_window.py | 3 | 2195 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtCore, QtWidgets
from Muon.GUI.Common.utilities import table_utils
class RemovePlotWindow(QtWidgets.QDialog):
applyRemoveSignal = QtCore.Signal(object)
closeEventSignal = QtCore.Signal()
def __init__(self, lines, subplot, vlines=[], parent=None):
super(RemovePlotWindow, self).__init__()
all_lines = lines + vlines
self._subplot = subplot
self.grid = QtWidgets.QGridLayout()
self.table = QtWidgets.QTableWidget(self)
self.table.resize(200, 200)
self.table.setRowCount(len(all_lines))
self.table.setColumnCount(2)
self.table.setColumnWidth(0, 150)
self.table.setColumnWidth(1, 50)
self.table.verticalHeader().setVisible(False)
self.table.horizontalHeader().setStretchLastSection(True)
self.table.setHorizontalHeaderLabels("Line name;Remove".split(";"))
table_utils.setTableHeaders(self.table)
self.widgets = {}
for index, line in enumerate(all_lines):
table_utils.setRowName(self.table, index, line)
tmp = {"line": line, "box": table_utils.addCheckBoxToTable(self.table, False, index)}
self.widgets[line] = tmp
self.grid.addWidget(self.table)
btn = QtWidgets.QPushButton("Remove")
self.grid.addWidget(btn)
self.setLayout(self.grid)
self.setWindowTitle("Remove Lines For " + self._subplot)
btn.clicked.connect(self.buttonClick)
def closeEvent(self, event):
self.closeEventSignal.emit()
def buttonClick(self):
self.applyRemoveSignal.emit(self.widgets.keys())
def getState(self, name):
return self.widgets[name]["box"].checkState() == QtCore.Qt.Checked
def getLine(self, name):
return self.widgets[name]["line"]
@property
def subplot(self):
return self._subplot
| gpl-3.0 | -484,304,044,883,406,850 | 33.84127 | 97 | 0.666515 | false | 3.732993 | false | false | false |
thecodinghub/news-for-good | my_app/urls.py | 1 | 1654 | """my_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
from news import views as post_view
urlpatterns = [
url(r'^$',post_view.PostList.as_view(), name='all'),
url(r'^admin/', admin.site.urls),
url(r'^users/',include('users.urls',namespace='users')),
url(r'^users/',include('django.contrib.auth.urls')),
url(r'^test/$',views.TestView.as_view(),name='test'),
url(r'^thanks/$',views.ThanksView.as_view(),name='thanks'),
url(r'^posts/', include('news.urls',namespace='news'))
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url('^__debug__/', include(debug_toolbar.urls)),
# staticfiles_urlpatterns(),
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
] + urlpatterns
| bsd-3-clause | 8,568,705,831,719,593,000 | 36.590909 | 79 | 0.675937 | false | 3.564655 | false | false | false |
mrosenstihl/PulsePrograms | CPMG/CPMG_exp.py | 1 | 2811 |
def fid_experiment(run):
e=Experiment()
e.set_description("run",run)
pulse90 = 2.1e-6 # s
pulse180 = 4.2e-6 # s
td = 1.3*5 # repetition time
phase = 155 #receiver phase
tau =300e-6 # s CPMG interpulse delay; > 100e-6
rec_time = 4e-6 # s <= tau-15e-6
sampl_freq = 20e6
no_echoes = 4000 # number of echoes
no_points = 64 # measured points in the accumulated signal
# ---------------------------------------------------------------------
e.set_description("tau",tau)
e.set_description("no_echoes", no_echoes+1)
e.set_description("pulse180",pulse180)
if pulse90>10e-6:
raise Exception("--- 90 Pulse too long!!! ---")
if pulse180>10e-6:
raise Exception("--- 180 Pulse too long!!! ---")
if tau <5e-6:
raise Exception("--- Echo time shorter than gate time!!! ---")
# ---------------------------------------------------------------------
e.set_phase(0)
e.wait(td-5e-6-0.5e-6)
# first pulse ----------------------------------------------------------------
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse90, value=1+2) # RF pulse
# -----------------------------------------------------------------------------
e.set_phase([90, 90, 270, 270][run%4])
e.wait(tau-5e-6-0.5e-6) # e.set_phase introduces 0.5e-6 delay
# first 180 pulse and recording -----------------------------------------------
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse180, value=1+2) # RF pulse
e.set_phase(phase+[0,180,0,180][run%4])
e.wait(tau-0.5e-6-rec_time/2)
e.record(samples=no_points, frequency=sampl_freq, timelength=rec_time, sensitivity=10) # this is rec_time long
# -----------------------------------------------------------------------------
e.loop_start(no_echoes)
e.set_phase([90.0, 90.0, 270.0, 270.0][run%4])
e.wait(tau-0.5e-6-5e-6-rec_time/2)
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse180, value=1+2) # RF pulse
e.set_phase(phase+[0,180,0,180][run%4])
e.wait(tau-0.5e-6-rec_time/2)
e.record(samples=no_points, frequency=sampl_freq, timelength=rec_time, sensitivity=5) # this is rec_time long
e.loop_end()
return e
def experiment():
accumulations=4
for run in xrange(accumulations):
yield fid_experiment(run)
pass | bsd-2-clause | -5,560,417,546,478,381,000 | 33.716049 | 114 | 0.449306 | false | 3.535849 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.