code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import datetime
from ecl.util.util import BoolVector
from ecl.util.test import TestAreaContext
from tests import ResTest
from res.enkf import ObsBlock
class ObsBlockTest(ResTest):
def test_create(self):
block = ObsBlock("OBS" , 1000)
self.assertTrue( isinstance( block , ObsBlock ))
self.assertEqual( 1000 , block.totalSize())
self.assertEqual( 0 , block.activeSize())
def test_access(self):
obs_size = 10
block = ObsBlock("OBS" , obs_size)
with self.assertRaises(IndexError):
block[100] = (1,1)
with self.assertRaises(IndexError):
block[-100] = (1,1)
with self.assertRaises(TypeError):
block[4] = 10
with self.assertRaises(TypeError):
block[4] = (1,1,9)
#------
with self.assertRaises(IndexError):
v = block[100]
with self.assertRaises(IndexError):
v = block[-100]
block[0] = (10,1)
v = block[0]
self.assertEqual( v , (10,1))
self.assertEqual( 1 , block.activeSize())
block[-1] = (17,19)
self.assertEqual( block[-1], (17,19))
| andreabrambilla/libres | python/tests/res/enkf/test_obs_block.py | Python | gpl-3.0 | 1,177 |
# -*- coding: utf-8 -*-
"""Configure batch3dfier with the input data."""
import os.path
from subprocess import call
from shapely.geometry import shape
from shapely import geos
from psycopg2 import sql
import fiona
def call_3dfier(db, tile, schema_tiles,
pc_file_name, pc_tile_case, pc_dir,
table_index_pc, fields_index_pc,
table_index_footprint, fields_index_footprint, uniqueid,
extent_ewkb, clip_prefix, prefix_tile_footprint,
yml_dir, tile_out, output_format, output_dir,
path_3dfier, thread):
"""Call 3dfier with the YAML config created by yamlr().
Note
----
For the rest of the parameters see batch3dfier_config.yml.
Parameters
----------
db : db Class instance
tile : str
Name of of the 2D tile.
schema_tiles : str
Schema of the footprint tiles.
pc_file_name : str
Naming convention for the pointcloud files. See 'dataset_name' in batch3dfier_config.yml.
pc_tile_case : str
How the string matching is done for pc_file_name. See 'tile_case' in batch3dfier_config.yml.
pc_dir : str
Directory of the pointcloud files. See 'dataset_dir' in batch3dfier_config.yml.
thread : str
Name/ID of the active thread.
extent_ewkb : str
EWKB representation of 'extent' in batch3dfier_config.yml.
clip_prefix : str
Prefix for naming the clipped/united views. This value shouldn't be a substring of the pointcloud file names.
prefix_tile_footprint : str or None
Prefix prepended to the footprint tile view names. If None, the views are named as
the values in fields_index_fooptrint['unit_name'].
Returns
-------
list
The tiles that are skipped because no corresponding pointcloud file
was found in 'dataset_dir' (YAML)
"""
pc_tiles = find_pc_tiles(db, table_index_pc, fields_index_pc,
table_index_footprint, fields_index_footprint,
extent_ewkb, tile_footprint=tile,
prefix_tile_footprint=prefix_tile_footprint)
pc_path = find_pc_files(pc_tiles, pc_dir, pc_file_name, pc_tile_case)
# prepare output file name
if not tile_out:
tile_out = tile.replace(clip_prefix, '', 1)
# Call 3dfier ------------------------------------------------------------
if pc_path:
# Needs a YAML per thread so one doesn't overwrite it while the other
# uses it
yml_name = thread + "_config.yml"
yml_path = os.path.join(yml_dir, yml_name)
config = yamlr(dbname=db.dbname, host=db.host, user=db.user,
pw=db.password, schema_tiles=schema_tiles,
bag_tile=tile, pc_path=pc_path,
output_format=output_format, uniqueid=uniqueid)
# Write temporary config file
try:
with open(yml_path, "w") as text_file:
text_file.write(config)
except BaseException:
print("Error: cannot write _config.yml")
# Prep output file name
if "obj" in output_format.lower():
o = tile_out + ".obj"
output_path = os.path.join(output_dir, o)
elif "csv" in output_format.lower():
o = tile_out + ".csv"
output_path = os.path.join(output_dir, o)
else:
output_path = os.path.join(output_dir, tile_out)
# Run 3dfier
command = (path_3dfier + " {yml} -o {out}").format(
yml=yml_path, out=output_path)
try:
call(command, shell=True)
except BaseException:
print("\nCannot run 3dfier on tile " + tile)
tile_skipped = tile
else:
print(
"\nPointcloud file(s) " +
str(pc_tiles) +
" not available. Skipping tile.\n")
tile_skipped = tile
return({'tile_skipped': tile_skipped,
'out_path': None})
return({'tile_skipped': None,
'out_path': output_path})
def yamlr(dbname, host, user, pw, schema_tiles,
bag_tile, pc_path, output_format, uniqueid):
"""Parse the YAML config file for 3dfier.
Parameters
----------
See batch3dfier_config.yml.
Returns
-------
string
the YAML config file for 3dfier
"""
pc_dataset = ""
if len(pc_path) > 1:
for p in pc_path:
pc_dataset += "- " + p + "\n" + " "
else:
pc_dataset += "- " + pc_path[0]
# !!! Do not correct the indentation of the config template, otherwise it
# results in 'YAML::TypedBadConversion<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >'
# because every line is indented as here
config = """
input_polygons:
- datasets:
- "PG:dbname={dbname} host={host} user={user} password={pw} schemas={schema_tiles} tables={bag_tile}"
uniqueid: {uniqueid}
lifting: Building
lifting_options:
Building:
height_roof: percentile-90
height_floor: percentile-10
lod: 1
input_elevation:
- datasets:
{pc_path}
omit_LAS_classes:
thinning: 0
options:
building_radius_vertex_elevation: 2.0
radius_vertex_elevation: 1.0
threshold_jump_edges: 0.5
output:
format: {output_format}
building_floor: true
vertical_exaggeration: 0
""".format(dbname=dbname,
host=host,
user=user,
pw=pw,
schema_tiles=schema_tiles,
bag_tile=bag_tile,
uniqueid=uniqueid,
pc_path=pc_dataset,
output_format=output_format)
return(config)
def find_pc_files(pc_tiles, pc_dir, pc_file_name, pc_tile_case):
"""Find pointcloud files in the file system when given a list of pointcloud tile names
"""
# Prepare AHN file names -------------------------------------------------
if pc_tile_case == "upper":
tiles = [pc_file_name.format(tile=t.upper()) for t in pc_tiles]
elif pc_tile_case == "lower":
tiles = [pc_file_name.format(tile=t.lower()) for t in pc_tiles]
elif pc_tile_case == "mixed":
tiles = [pc_file_name.format(tile=t) for t in pc_tiles]
else:
raise "Please provide one of the allowed values for pc_tile_case."
# use the tile list in tiles to parse the pointcloud file names
pc_path = [os.path.join(pc_dir, pc_tile) for pc_tile in tiles]
if all([os.path.isfile(p) for p in pc_path]):
return(pc_path)
else:
return(None)
def find_pc_tiles(db, table_index_pc, fields_index_pc,
table_index_footprint=None, fields_index_footprint=None,
extent_ewkb=None, tile_footprint=None,
prefix_tile_footprint=None):
"""Find pointcloud tiles in tile index that intersect the extent or the footprint tile.
Parameters
----------
prefix_tile_footprint : str or None
Prefix prepended to the footprint tile view names. If None, the views are named as
the values in fields_index_fooptrint['unit_name'].
"""
if extent_ewkb:
tiles = get_2Dtiles(db, table_index_pc, fields_index_pc, extent_ewkb)
else:
schema_pc_q = sql.Identifier(table_index_pc['schema'])
table_pc_q = sql.Identifier(table_index_pc['table'])
field_pc_geom_q = sql.Identifier(fields_index_pc['geometry'])
field_pc_unit_q = sql.Identifier(fields_index_pc['unit_name'])
schema_ftpr_q = sql.Identifier(table_index_footprint['schema'])
table_ftpr_q = sql.Identifier(table_index_footprint['table'])
field_ftpr_geom_q = sql.Identifier(fields_index_footprint['geometry'])
field_ftpr_unit_q = sql.Identifier(fields_index_footprint['unit_name'])
if prefix_tile_footprint:
tile_footprint = tile_footprint.replace(
prefix_tile_footprint, '', 1)
tile_q = sql.Literal(tile_footprint)
query = sql.SQL("""
SELECT
{table_pc}.{field_pc_unit}
FROM
{schema_pc}.{table_pc},
{schema_ftpr}.{table_ftpr}
WHERE
{table_ftpr}.{field_ftpr_unit} = {tile}
AND st_intersects(
{table_pc}.{field_pc_geom},
{table_ftpr}.{field_ftpr_geom}
);
""").format(table_pc=table_pc_q,
field_pc_unit=field_pc_unit_q,
schema_pc=schema_pc_q,
schema_ftpr=schema_ftpr_q,
table_ftpr=table_ftpr_q,
field_ftpr_unit=field_ftpr_unit_q,
tile=tile_q,
field_pc_geom=field_pc_geom_q,
field_ftpr_geom=field_ftpr_geom_q)
resultset = db.getQuery(query)
tiles = [tile[0] for tile in resultset]
return(tiles)
def extent_to_ewkb(db, table_index, file):
"""Reads a polygon from a file and returns its EWKB.
I didn't find a simple way to safely get SRIDs from the input geometry
with Shapely, therefore it is obtained from the database and the CRS of the
polygon is assumed to be the same as of the tile indexes.
Parameters
----------
db : db Class instance
table_index : dict
{'schema' : str, 'table' : str} of the table of tile index.
file : str
Path to the polygon for clipping the input.
Must be in the same CRS as the table_index.
Returns
-------
[Shapely polygon, EWKB str]
"""
schema = sql.Identifier(table_index['schema'])
table = sql.Identifier(table_index['table'])
query = sql.SQL("""SELECT st_srid(geom) AS srid
FROM {schema}.{table}
LIMIT 1;""").format(schema=schema, table=table)
srid = db.getQuery(query)[0][0]
assert srid is not None
# Get clip polygon and set SRID
with fiona.open(file, 'r') as src:
poly = shape(src[0]['geometry'])
# Change a the default mode to add this, if SRID is set
geos.WKBWriter.defaults['include_srid'] = True
# set SRID for polygon
geos.lgeos.GEOSSetSRID(poly._geom, srid)
ewkb = poly.wkb_hex
return([poly, ewkb])
def get_2Dtiles(db, table_index, fields_index, ewkb):
"""Returns a list of tiles that overlap the output extent.
Parameters
----------
db : db Class instance
table_index : dict
{'schema' : str, 'table' : str} of the table of tile index.
fields_index : dict
{'primary_key' : str, 'geometry' : str, 'unit_name' : str}
primary_key: Name of the primary_key field in table_index.
geometry: Name of the geometry field in table_index.
unit: Name of the field in table_index that contains the index unit names.
ewkb : str
EWKB representation of a polygon.
Returns
-------
[tile IDs]
Tiles that are intersected by the polygon that is provided in 'extent' (YAML).
"""
schema = sql.Identifier(table_index['schema'])
table = sql.Identifier(table_index['table'])
field_idx_geom_q = sql.Identifier(fields_index['geometry'])
field_idx_unit_q = sql.Identifier(fields_index['unit_name'])
ewkb_q = sql.Literal(ewkb)
# TODO: user input for a.unit
query = sql.SQL("""
SELECT {table}.{field_idx_unit}
FROM {schema}.{table}
WHERE st_intersects({table}.{field_idx_geom}, {ewkb}::geometry);
""").format(schema=schema,
table=table,
field_idx_unit=field_idx_unit_q,
field_idx_geom=field_idx_geom_q,
ewkb=ewkb_q)
resultset = db.getQuery(query)
tiles = [tile[0] for tile in resultset]
print("Nr. of tiles in clip extent: " + str(len(tiles)))
return(tiles)
def get_2Dtile_area(db, table_index):
"""Get the area of a 2D tile.
Note
----
Assumes that all tiles have equal area. Area is in units of the tile CRS.
Parameters
----------
db : db Class instance
table_index : list of str
{'schema' : str, 'table' : str} of the table of tile index.
Returns
-------
float
"""
schema = sql.Identifier(table_index['schema'])
table = sql.Identifier(table_index['table'])
query = sql.SQL("""
SELECT public.st_area(geom) AS area
FROM {schema}.{table}
LIMIT 1;
""").format(schema=schema, table=table)
area = db.getQuery(query)[0][0]
return(area)
def get_2Dtile_views(db, schema_tiles, tiles):
"""Get View names of the 2D tiles. It tries to find views in schema_tiles
that contain the respective tile ID in their name.
Parameters
----------
db : db Class instance
schema_tiles: str
Name of the schema where the 2D tile views are stored.
tiles : list
Tile IDs
Returns
-------
list
Name of the view that contain the tile ID as substring.
"""
# Get View names for the tiles
t = ["%" + str(tile) + "%" for tile in tiles]
t = sql.Literal(t)
schema_tiles = sql.Literal(schema_tiles)
query = sql.SQL("""SELECT table_name
FROM information_schema.views
WHERE table_schema = {}
AND table_name LIKE any({});
""").format(schema_tiles, t)
resultset = db.getQuery(query)
tile_views = [tile[0] for tile in resultset]
return(tile_views)
def clip_2Dtiles(db, user_schema, schema_tiles, tiles, poly, clip_prefix,
fields_view):
"""Creates views for the clipped tiles.
Parameters
----------
db : db Class instance
user_schema: str
schema_tiles : str
tiles : list
poly : Shapely polygon
clip_prefix : str
Returns
-------
list
Name of the views of the clipped tiles.
"""
user_schema = sql.Identifier(user_schema)
schema_tiles = sql.Identifier(schema_tiles)
tiles_clipped = []
fields_all = fields_view['all']
field_geom_q = sql.Identifier(fields_view['geometry'])
for tile in tiles:
t = clip_prefix + tile
tiles_clipped.append(t)
view = sql.Identifier(t)
tile_view = sql.Identifier(tile)
fields_q = parse_sql_select_fields(tile, fields_all)
wkb = sql.Literal(poly.wkb_hex)
query = sql.SQL("""
CREATE OR REPLACE VIEW {user_schema}.{view} AS
SELECT
{fields}
FROM
{schema_tiles}.{tile_view}
WHERE
st_within({tile_view}.{geom}, {wkb}::geometry)"""
).format(user_schema=user_schema,
schema_tiles=schema_tiles,
view=view,
fields=fields_q,
tile_view=tile_view,
geom=field_geom_q,
wkb=wkb)
db.sendQuery(query)
try:
db.conn.commit()
print(
str(
len(tiles_clipped)) +
" views with prefix '{}' are created in schema {}.".format(
clip_prefix,
user_schema))
except BaseException:
print("Cannot create view {user_schema}.{clip_prefix}{tile}".format(
schema_tiles=schema_tiles, clip_prefix=clip_prefix))
db.conn.rollback()
return(tiles_clipped)
def union_2Dtiles(db, user_schema, tiles_clipped, clip_prefix, fields_view):
"""Union the clipped tiles into a single view.
Parameters
----------
db : db Class instance
user_schema : str
tiles_clipped : list
clip_prefix : str
Returns
-------
str
Name of the united view.
"""
# Check if there are enough tiles to unite
assert len(tiles_clipped) > 1, "Need at least 2 tiles for union"
user_schema = sql.Identifier(user_schema)
u = "{clip_prefix}union".format(clip_prefix=clip_prefix)
union_view = sql.Identifier(u)
sql_query = sql.SQL("CREATE OR REPLACE VIEW {user_schema}.{view} AS ").format(
user_schema=user_schema, view=union_view)
fields_all = fields_view['all']
for tile in tiles_clipped[:-1]:
view = sql.Identifier(tile)
fields_q = parse_sql_select_fields(tile, fields_all)
sql_subquery = sql.SQL("""SELECT {fields}
FROM {user_schema}.{view}
UNION ALL """).format(fields=fields_q,
user_schema=user_schema,
view=view)
sql_query = sql_query + sql_subquery
# The last statement
tile = tiles_clipped[-1]
view = sql.Identifier(tile)
fields_q = parse_sql_select_fields(tile, fields_all)
sql_subquery = sql.SQL("""SELECT {fields}
FROM {user_schema}.{view};
""").format(fields=fields_q,
user_schema=user_schema,
view=view)
sql_query = sql_query + sql_subquery
db.sendQuery(sql_query)
try:
db.conn.commit()
print("View {} created in schema {}.".format(u, user_schema))
except BaseException:
print("Cannot create view {}.{}".format(user_schema, u))
db.conn.rollback()
return(False)
return(u)
def get_view_fields(db, user_schema, tile_views):
"""Get the fields in a 2D tile view
Parameters
----------
tile_views : list of str
Returns
-------
{'all' : list, 'geometry' : str}
"""
if len(tile_views) > 0:
schema_q = sql.Literal(user_schema)
view_q = sql.Literal(tile_views[0])
resultset = db.getQuery(sql.SQL("""
SELECT
column_name
FROM
information_schema.columns
WHERE
table_schema = {schema}
AND table_name = {view};
""").format(schema=schema_q,
view=view_q))
f = [field[0] for field in resultset]
geom_res = db.getQuery(sql.SQL("""
SELECT
f_geometry_column
FROM
public.geometry_columns
WHERE
f_table_schema = {schema}
AND f_table_name = {view};
""").format(schema=schema_q,
view=view_q))
f_geom = geom_res[0][0]
fields = {}
fields['all'] = f
fields['geometry'] = f_geom
return(fields)
else:
return(None)
def parse_sql_select_fields(table, fields):
"""Parses a list of field names into "table"."field" to insert into a SELECT ... FROM table
Parameters
----------
fields : list of str
Returns
-------
psycopg2.sql.Composable
"""
s = []
for f in fields:
s.append(sql.SQL('.').join([sql.Identifier(table), sql.Identifier(f)]))
sql_fields = sql.SQL(', ').join(s)
return(sql_fields)
def drop_2Dtiles(db, user_schema, views_to_drop):
"""Drops Views in a given schema.
Note
----
Used for dropping the views created by clip_2Dtiles() and union_2Dtiles().
Parameters
----------
db : db Class instance
user_schema : str
views_to_drop : list
Returns
-------
bool
"""
user_schema = sql.Identifier(user_schema)
for view in views_to_drop:
view = sql.Identifier(view)
query = sql.SQL("DROP VIEW IF EXISTS {user_schema}.{view} CASCADE;").format(
user_schema=user_schema, view=view)
db.sendQuery(query)
try:
db.conn.commit()
print("Dropped {} in schema {}.".format(views_to_drop, user_schema))
# sql.Identifier("tile_index").as_string(dbs.conn)
return(True)
except BaseException:
print("Cannot drop views ", views_to_drop)
db.conn.rollback()
return(False) | balazsdukai/batch3dfier | batch3dfier/config.py | Python | gpl-3.0 | 20,894 |
# Released under the GNU General Public License version 3 by J2897.
def get_page(page):
import urllib2
source = urllib2.urlopen(page)
return source.read()
title = 'WinSCP Updater'
target = 'Downloading WinSCP'
url = 'http://winscp.net/eng/download.php'
print 'Running: ' + title
print 'Target: ' + target
print 'URL: ' + url
try:
page = get_page(url)
except:
page = None
else:
print 'Got page...'
def msg_box(message, box_type):
import win32api
user_input = win32api.MessageBox(0, message, title, box_type)
return user_input
def stop():
import sys
sys.exit()
if page == None:
msg_box('Could not download the page. You may not be connected to the internet.', 0)
stop()
def find_site_ver(page):
T1 = page.find(target)
if T1 == -1:
return None, None
T2 = page.find('>WinSCP ', T1)
T3 = page.find('<', T2)
T4 = page.find('winscp', T3)
T5 = page.find('.exe', T4)
return page[T2+8:T3], page[T4:T5+4] # 5.1.5, winscp515setup.exe
try:
site_version, FN = find_site_ver(page)
except:
msg_box('Could not search the page.', 0)
stop()
else:
print 'Found: ' + site_version
if site_version == None:
msg_box('The search target has not been found on the page. The formatting, or the text on the page, may have been changed.', 0)
stop()
import os
tmp = os.getenv('TEMP')
PF = os.getenv('PROGRAMFILES')
WinSCP_exe = PF + '\\WinSCP\\WinSCP.exe'
DL = tmp + '\\' + FN
command = [DL, '/SILENT', '/NORESTART']
def DL_file():
import urllib
url = 'http://downloads.sourceforge.net/project/winscp/WinSCP/' + site_version + '/' + FN
urllib.urlretrieve(url, DL)
def sub_proc(command):
import subprocess
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode # is 0 if success
def download_install():
try:
DL_file()
except:
msg_box('Failed to download ' + FN + ' to ' + tmp + '.', 0)
stop()
else:
print 'Downloaded: ' + FN
try:
RC = sub_proc(command)
except:
RC = None
if RC == None:
msg_box('Failed to execute ' + FN + '.', 0)
stop()
elif RC == 0:
msg_box('Successfully updated to version ' + site_version + '.', 0)
stop()
else:
msg_box('Successfully spawned new process for ' + FN + '. But the installation appears to have failed.', 0)
stop()
# Check if the WinSCP.exe file exists...
if not os.path.isfile(WinSCP_exe):
# No: Download and install WinSCP, and quit.
print 'WinSCP.exe file doesn\'t exist.'
print 'Installing WinSCP for the first time...'
download_install()
print 'Ending...'
delay(5)
stop()
import win32api
try:
info = win32api.GetFileVersionInfo(WinSCP_exe, "\\")
ms = info['FileVersionMS']
ls = info['FileVersionLS']
file_version = "%d.%d.%d.%d" % (win32api.HIWORD(ms), win32api.LOWORD (ms),
win32api.HIWORD (ls), win32api.LOWORD (ls))
except:
msg_box('Cannot find the file version of the local WinSCP.exe file.', 0)
stop()
else:
print 'Got local file version information...'
# Check if the site_version numbers are in the file_version numbers...
def clean(text):
import re
return re.sub('[^0-9]', '', text)
clean_site_version = clean(site_version)
clean_file_version = clean(file_version)[:len(clean_site_version)]
print 'Local version: ' + clean_file_version
print 'Site version: ' + clean_site_version
def delay(sec):
import time
time.sleep(sec)
if clean_file_version.find(clean_site_version) != -1:
# Yes: Quit.
print 'Match!'
print 'Ending...'
delay(5)
stop()
# Check if WinSCP is running...
def find_proc(exe):
import subprocess
cmd = 'WMIC PROCESS get Caption'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
if line.find(exe) != -1:
return True
while find_proc('WinSCP.exe'):
print 'WinSCP is running. Close WinSCP now!'
user_input = msg_box('There is a new version of WinSCP available. Please close WinSCP and press OK to continue.', 1)
if user_input == 1:
pass
elif user_input == 2:
stop()
# Now download and install the new file...
user_input = msg_box('If you use a custom WinSCP.ini file, back it up now and then press OK when you are ready to proceed with the update.', 1)
if user_input == 2:
stop()
download_install()
print 'Ending...'
delay(5)
| J2897/WinSCP_Updater | Update_WinSCP.py | Python | gpl-3.0 | 4,220 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Rigacar (Generates Car Rig)",
"author": "David Gayerie",
"version": (7, 0),
"blender": (2, 83, 0),
"location": "View3D > Add > Armature",
"description": "Adds a deformation rig for vehicules, generates animation rig and bake wheels animation.",
"wiki_url": "http://digicreatures.net/articles/rigacar.html",
"tracker_url": "https://github.com/digicreatures/rigacar/issues",
"category": "Rigging"}
if "bpy" in locals():
import importlib
if "bake_operators" in locals():
importlib.reload(bake_operators)
if "car_rig" in locals():
importlib.reload(car_rig)
if "widgets" in locals():
importlib.reload(widgets)
else:
import bpy
from . import bake_operators
from . import car_rig
def enumerate_ground_sensors(bones):
bone = bones.get('GroundSensor.Axle.Ft')
if bone is not None:
yield bone
for bone in bones:
if bone.name.startswith('GroundSensor.Ft'):
yield bone
bone = bones.get('GroundSensor.Axle.Bk')
if bone is not None:
yield bone
for bone in bones:
if bone.name.startswith('GroundSensor.Bk'):
yield bone
class RIGACAR_PT_mixin:
def __init__(self):
self.layout.use_property_split = True
self.layout.use_property_decorate = False
@classmethod
def is_car_rig(cls, context):
return context.object is not None and context.object.data is not None and 'Car Rig' in context.object.data
@classmethod
def is_car_rig_generated(cls, context):
return cls.is_car_rig(context) and context.object.data['Car Rig']
def display_generate_section(self, context):
self.layout.operator(car_rig.POSE_OT_carAnimationRigGenerate.bl_idname, text='Generate')
def display_bake_section(self, context):
self.layout.operator(bake_operators.ANIM_OT_carSteeringBake.bl_idname)
self.layout.operator(bake_operators.ANIM_OT_carWheelsRotationBake.bl_idname)
self.layout.operator(bake_operators.ANIM_OT_carClearSteeringWheelsRotation.bl_idname)
def display_rig_props_section(self, context):
layout = self.layout.column()
layout.prop(context.object, '["wheels_on_y_axis"]', text="Wheels on Y axis")
layout.prop(context.object, '["suspension_factor"]', text="Pitch factor")
layout.prop(context.object, '["suspension_rolling_factor"]', text="Roll factor")
def display_ground_sensors_section(self, context):
for ground_sensor in enumerate_ground_sensors(context.object.pose.bones):
ground_projection_constraint = ground_sensor.constraints.get('Ground projection')
self.layout.label(text=ground_sensor.name, icon='BONE_DATA')
if ground_projection_constraint is not None:
self.layout.prop(ground_projection_constraint, 'target', text='Ground')
if ground_projection_constraint.target is not None:
self.layout.prop(ground_projection_constraint, 'shrinkwrap_type')
if ground_projection_constraint.shrinkwrap_type == 'PROJECT':
self.layout.prop(ground_projection_constraint, 'project_limit')
self.layout.prop(ground_projection_constraint, 'influence')
ground_projection_limit_constraint = ground_sensor.constraints.get('Ground projection limitation')
if ground_projection_limit_constraint is not None:
self.layout.prop(ground_projection_limit_constraint, 'min_z', text='Min local Z')
self.layout.prop(ground_projection_limit_constraint, 'max_z', text='Max local Z')
self.layout.separator()
class RIGACAR_PT_rigProperties(bpy.types.Panel, RIGACAR_PT_mixin):
bl_label = "Rigacar"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return RIGACAR_PT_mixin.is_car_rig(context)
def draw(self, context):
if RIGACAR_PT_mixin.is_car_rig_generated(context):
self.display_rig_props_section(context)
self.layout.separator()
self.display_bake_section(context)
else:
self.display_generate_section(context)
class RIGACAR_PT_groundSensorsProperties(bpy.types.Panel, RIGACAR_PT_mixin):
bl_label = "Ground Sensors"
bl_parent_id = "RIGACAR_PT_rigProperties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return RIGACAR_PT_mixin.is_car_rig_generated(context)
def draw(self, context):
self.display_ground_sensors_section(context)
class RIGACAR_PT_animationRigView(bpy.types.Panel, RIGACAR_PT_mixin):
bl_category = "Rigacar"
bl_label = "Animation Rig"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
@classmethod
def poll(cls, context):
return RIGACAR_PT_mixin.is_car_rig(context)
def draw(self, context):
if RIGACAR_PT_mixin.is_car_rig_generated(context):
self.display_rig_props_section(context)
else:
self.display_generate_section(context)
class RIGACAR_PT_wheelsAnimationView(bpy.types.Panel, RIGACAR_PT_mixin):
bl_category = "Rigacar"
bl_label = "Wheels animation"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
@classmethod
def poll(cls, context):
return RIGACAR_PT_mixin.is_car_rig_generated(context)
def draw(self, context):
self.display_bake_section(context)
class RIGACAR_PT_groundSensorsView(bpy.types.Panel, RIGACAR_PT_mixin):
bl_category = "Rigacar"
bl_label = "Ground Sensors"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return RIGACAR_PT_mixin.is_car_rig_generated(context)
def draw(self, context):
self.display_ground_sensors_section(context)
def menu_entries(menu, context):
menu.layout.operator(car_rig.OBJECT_OT_armatureCarDeformationRig.bl_idname, text="Car (deformation rig)", icon='AUTO')
classes = (
RIGACAR_PT_rigProperties,
RIGACAR_PT_groundSensorsProperties,
RIGACAR_PT_animationRigView,
RIGACAR_PT_wheelsAnimationView,
RIGACAR_PT_groundSensorsView,
)
def register():
bpy.types.VIEW3D_MT_armature_add.append(menu_entries)
for c in classes:
bpy.utils.register_class(c)
car_rig.register()
bake_operators.register()
def unregister():
bake_operators.unregister()
car_rig.unregister()
for c in classes:
bpy.utils.unregister_class(c)
bpy.types.VIEW3D_MT_armature_add.remove(menu_entries)
if __name__ == "__main__":
register()
| digicreatures/rigacar | __init__.py | Python | gpl-3.0 | 7,661 |
#!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
path.append(str(element))
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages))
| jonathon-love/snapcraft | snapcraft/_schema.py | Python | gpl-3.0 | 2,896 |
from d51.django.auth.decorators import auth_required
from django.contrib.sites.models import Site
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ImproperlyConfigured
from .services import load_service, SharingServiceInvalidForm
from .models import URL
SHARE_KEY='u'
@auth_required()
def share_url(request, service_name):
# TODO: this view needs testing
response = HttpResponseRedirect(request.GET.get('next', '/'))
url_to_share = request.GET.get(SHARE_KEY, None)
if url_to_share is None:
# TODO change to a 400
raise Http404
else:
full_url_to_share = 'http://%s%s' % ((Site.objects.get_current().domain, url_to_share)) if url_to_share.find(':') == -1 else url_to_share
url, created = URL.objects.get_or_create(
url=full_url_to_share,
)
try:
url.send(service_name, request.user, request.POST)
except SharingServiceInvalidForm:
service = load_service(service_name, url)
input = [] if request.method != 'POST' else [request.POST]
form = service.get_form_class()(*input)
templates, context = [
'sharing/%s/prompt.html'%service_name,
'sharing/prompt.html'
],{
'service_name':service_name,
'form': form,
'url':url_to_share,
'SHARE_KEY':SHARE_KEY,
'next':request.GET.get('next','/')
}
response = render_to_response(templates, context, context_instance=RequestContext(request))
except ImproperlyConfigured:
raise Http404
return response
| domain51/d51.django.apps.sharing | d51/django/apps/sharing/views.py | Python | gpl-3.0 | 1,793 |
#!/usr/bin/python3
import gpxpy
import datetime
import time
import os
import gpxpy.gpx
import sqlite3
import pl
import re
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
filebase = os.environ["XDG_DATA_HOME"]+"/"+os.environ["APP_ID"].split('_')[0]
def create_gpx():
# Creating a new file:
# --------------------
gpx = gpxpy.gpx.GPX()
# Create first track in our GPX:
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
# Create first segment in our GPX track:
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
# Create points:
return gpx
def write_gpx(gpx,name,act_type):
# You can add routes and waypoints, too...
tzname=None
npoints=None
# polyline encoder default values
numLevels = 18;
zoomFactor = 2;
epsilon = 0.0;
forceEndpoints = True;
##print('Created GPX:', gpx.to_xml())
ts = int(time.time())
filename = "%s/%i.gpx" % (filebase,ts)
a = open(filename, 'w')
a.write(gpx.to_xml())
a.close()
gpx.simplify()
#gpx.reduce_points(1000)
trk = pl.read_gpx_trk(gpx.to_xml(),tzname,npoints,2,None)
try:
polyline=pl.print_gpx_google_polyline(trk,numLevels,zoomFactor,epsilon,forceEndpoints)
except UnboundLocalError as er:
print(er)
print("Not enough points to create a polyline")
polyline=""
#polyline="polyline"
add_run(gpx,name,act_type,filename,polyline)
def add_point(gpx,lat,lng,elev):
gpx.tracks[0].segments[0].points.append(gpxpy.gpx.GPXTrackPoint(lat, lng, elevation=elev,time=datetime.datetime.now()))
def add_run(gpx, name,act_type,filename,polyline):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
sql = "INSERT INTO activities VALUES (?,?,?,?,?,?,?,?)"
start_time, end_time = gpx.get_time_bounds()
l2d='{:.3f}'.format(gpx.length_2d() / 1000.)
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
print(max_speed)
#print('%sStopped distance: %sm' % stopped_distance)
maxspeed = 'Max speed: {:.2f}km/h'.format(max_speed * 60. ** 2 / 1000. if max_speed else 0)
duration = 'Duration: {:.2f}min'.format(gpx.get_duration() / 60)
print("-------------------------")
print(name)
print(start_time)
print(l2d)
print(maxspeed)
print("-------------------------")
try:
cursor.execute(sql, [None, name,start_time,l2d,duration,act_type,filename,polyline])
conn.commit()
except sqlite3.Error as er:
print(er)
conn.close()
def get_runs():
#add_run("1", "2", "3", "4")
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
ret_data=[]
sql = "SELECT * FROM activities LIMIT 30"
for i in cursor.execute(sql):
ret_data.append(dict(i))
conn.close()
return ret_data
def get_units():
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists settings
(units text)""")
ret_data=[]
sql = "SELECT units FROM settings"
cursor.execute(sql)
data=cursor.fetchone()
if data is None:
print("NONESIES")
cursor.execute("INSERT INTO settings VALUES ('kilometers')")
conn.commit()
conn.close()
return "kilometers"
return data
def set_units(label):
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("UPDATE settings SET units=? WHERE 1", (label,))
conn.commit()
conn.close()
def onetime_db_fix():
os.makedirs(filebase, exist_ok=True)
filename = "%s/%s" % (filebase,".dbfixed")
if not os.path.exists(filename):
print("Fixing db")
conn = sqlite3.connect('%s/activities.db' % filebase)
numonly = re.compile("(\d*\.\d*)")
cursor = conn.cursor()
a=get_runs()
sql="UPDATE activities SET distance=? WHERE id=?"
for i in a:
print(i["distance"])
b=numonly.search(i["distance"])
print(b.group(0))
print(b)
cursor.execute(sql, (b.group(0), i["id"]))
conn.commit()
conn.close()
dotfile=open(filename, "w")
dotfile.write("db fixed")
dotfile.close
else:
print("db already fixed")
def rm_run(run):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
sql = "DELETE from activities WHERE id=?"
try:
cursor.execute(sql, [run])
conn.commit()
except sqlite3.Error as er:
print("-------------______---_____---___----____--____---___-----")
print(er)
conn.close()
def km_to_mi(km):
return km * 0.62137
def get_data():
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
return moving_distance, moving_time
| VictorThompson/ActivityTracker | py/geepeeex.py | Python | gpl-3.0 | 5,539 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.date_added'
db.add_column(u'clone_product', 'date_added',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 8, 3, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.date_added'
db.delete_column(u'clone_product', 'date_added')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clone.product': {
'Meta': {'object_name': 'Product'},
'base_price': ('django.db.models.fields.FloatField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['clone'] | indradhanush/Instamojo-Clone | clone/migrations/0002_auto__add_field_product_date_added.py | Python | gpl-3.0 | 4,593 |
class Video(object):
def __init__(self, json):
self.id = json['id']
self.slug = json['slug']
self.title = json['title']
self.presenters = json['presenters']
self.host = json['host']
self.embed_code = json['embed_code']
def presenter_names(self):
return ', '.join(map(lambda p: p['first_name'] + ' ' + p['last_name'], self.presenters))
def url(self):
return 'plugin://plugin.video.%s/?action=play_video&videoid=%s' % (self.host, self.embed_code)
def is_available(self):
return True if self.embed_code else False
| watsonbox/xbmc-confreaks | api/video.py | Python | gpl-3.0 | 558 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hue-gui.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1161, 620)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.channel1Check = QtWidgets.QCheckBox(self.centralwidget)
self.channel1Check.setGeometry(QtCore.QRect(10, 10, 161, 17))
self.channel1Check.setChecked(True)
self.channel1Check.setObjectName("channel1Check")
self.channel2Check = QtWidgets.QCheckBox(self.centralwidget)
self.channel2Check.setGeometry(QtCore.QRect(10, 30, 151, 17))
self.channel2Check.setChecked(True)
self.channel2Check.setObjectName("channel2Check")
self.modeWidget = QtWidgets.QTabWidget(self.centralwidget)
self.modeWidget.setGeometry(QtCore.QRect(10, 60, 1131, 451))
self.modeWidget.setObjectName("modeWidget")
self.presetTab = QtWidgets.QWidget()
self.presetTab.setObjectName("presetTab")
self.presetModeWidget = QtWidgets.QTabWidget(self.presetTab)
self.presetModeWidget.setGeometry(QtCore.QRect(6, 10, 1101, 411))
self.presetModeWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.presetModeWidget.setObjectName("presetModeWidget")
self.fixedTab = QtWidgets.QWidget()
self.fixedTab.setObjectName("fixedTab")
self.groupBox = QtWidgets.QGroupBox(self.fixedTab)
self.groupBox.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox.setObjectName("groupBox")
self.fixedList = QtWidgets.QListWidget(self.groupBox)
self.fixedList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.fixedList.setObjectName("fixedList")
self.fixedAdd = QtWidgets.QPushButton(self.groupBox)
self.fixedAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.fixedAdd.setObjectName("fixedAdd")
self.fixedDelete = QtWidgets.QPushButton(self.groupBox)
self.fixedDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.fixedDelete.setObjectName("fixedDelete")
self.presetModeWidget.addTab(self.fixedTab, "")
self.breathingTab = QtWidgets.QWidget()
self.breathingTab.setObjectName("breathingTab")
self.groupBox_2 = QtWidgets.QGroupBox(self.breathingTab)
self.groupBox_2.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_2.setObjectName("groupBox_2")
self.breathingList = QtWidgets.QListWidget(self.groupBox_2)
self.breathingList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.breathingList.setObjectName("breathingList")
self.breathingAdd = QtWidgets.QPushButton(self.groupBox_2)
self.breathingAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.breathingAdd.setObjectName("breathingAdd")
self.breathingDelete = QtWidgets.QPushButton(self.groupBox_2)
self.breathingDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.breathingDelete.setObjectName("breathingDelete")
self.groupBox_11 = QtWidgets.QGroupBox(self.breathingTab)
self.groupBox_11.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_11.setObjectName("groupBox_11")
self.breathingSpeed = QtWidgets.QSlider(self.groupBox_11)
self.breathingSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.breathingSpeed.setMaximum(4)
self.breathingSpeed.setProperty("value", 2)
self.breathingSpeed.setOrientation(QtCore.Qt.Vertical)
self.breathingSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.breathingSpeed.setObjectName("breathingSpeed")
self.label_2 = QtWidgets.QLabel(self.groupBox_11)
self.label_2.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_2.setObjectName("label_2")
self.label_4 = QtWidgets.QLabel(self.groupBox_11)
self.label_4.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.groupBox_11)
self.label_5.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_5.setObjectName("label_5")
self.presetModeWidget.addTab(self.breathingTab, "")
self.fadingTab = QtWidgets.QWidget()
self.fadingTab.setObjectName("fadingTab")
self.groupBox_3 = QtWidgets.QGroupBox(self.fadingTab)
self.groupBox_3.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_3.setObjectName("groupBox_3")
self.fadingList = QtWidgets.QListWidget(self.groupBox_3)
self.fadingList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.fadingList.setObjectName("fadingList")
self.fadingAdd = QtWidgets.QPushButton(self.groupBox_3)
self.fadingAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.fadingAdd.setObjectName("fadingAdd")
self.fadingDelete = QtWidgets.QPushButton(self.groupBox_3)
self.fadingDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.fadingDelete.setObjectName("fadingDelete")
self.groupBox_12 = QtWidgets.QGroupBox(self.fadingTab)
self.groupBox_12.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_12.setObjectName("groupBox_12")
self.fadingSpeed = QtWidgets.QSlider(self.groupBox_12)
self.fadingSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.fadingSpeed.setMaximum(4)
self.fadingSpeed.setProperty("value", 2)
self.fadingSpeed.setOrientation(QtCore.Qt.Vertical)
self.fadingSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.fadingSpeed.setObjectName("fadingSpeed")
self.label_9 = QtWidgets.QLabel(self.groupBox_12)
self.label_9.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.groupBox_12)
self.label_10.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.groupBox_12)
self.label_11.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_11.setObjectName("label_11")
self.presetModeWidget.addTab(self.fadingTab, "")
self.marqueeTab = QtWidgets.QWidget()
self.marqueeTab.setObjectName("marqueeTab")
self.groupBox_4 = QtWidgets.QGroupBox(self.marqueeTab)
self.groupBox_4.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_4.setObjectName("groupBox_4")
self.marqueeList = QtWidgets.QListWidget(self.groupBox_4)
self.marqueeList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.marqueeList.setObjectName("marqueeList")
self.marqueeAdd = QtWidgets.QPushButton(self.groupBox_4)
self.marqueeAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.marqueeAdd.setObjectName("marqueeAdd")
self.marqueeDelete = QtWidgets.QPushButton(self.groupBox_4)
self.marqueeDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.marqueeDelete.setObjectName("marqueeDelete")
self.groupBox_13 = QtWidgets.QGroupBox(self.marqueeTab)
self.groupBox_13.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_13.setObjectName("groupBox_13")
self.marqueeSpeed = QtWidgets.QSlider(self.groupBox_13)
self.marqueeSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.marqueeSpeed.setMaximum(4)
self.marqueeSpeed.setProperty("value", 2)
self.marqueeSpeed.setOrientation(QtCore.Qt.Vertical)
self.marqueeSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.marqueeSpeed.setObjectName("marqueeSpeed")
self.label_15 = QtWidgets.QLabel(self.groupBox_13)
self.label_15.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.groupBox_13)
self.label_16.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_16.setObjectName("label_16")
self.label_17 = QtWidgets.QLabel(self.groupBox_13)
self.label_17.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_17.setObjectName("label_17")
self.marqueeSize = QtWidgets.QSlider(self.groupBox_13)
self.marqueeSize.setGeometry(QtCore.QRect(185, 70, 31, 160))
self.marqueeSize.setMaximum(3)
self.marqueeSize.setProperty("value", 2)
self.marqueeSize.setOrientation(QtCore.Qt.Vertical)
self.marqueeSize.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.marqueeSize.setObjectName("marqueeSize")
self.label_18 = QtWidgets.QLabel(self.groupBox_13)
self.label_18.setGeometry(QtCore.QRect(240, 210, 62, 20))
self.label_18.setObjectName("label_18")
self.label_19 = QtWidgets.QLabel(self.groupBox_13)
self.label_19.setGeometry(QtCore.QRect(180, 40, 62, 20))
self.label_19.setObjectName("label_19")
self.label_20 = QtWidgets.QLabel(self.groupBox_13)
self.label_20.setGeometry(QtCore.QRect(240, 60, 62, 20))
self.label_20.setObjectName("label_20")
self.marqueeBackwards = QtWidgets.QCheckBox(self.groupBox_13)
self.marqueeBackwards.setGeometry(QtCore.QRect(20, 260, 89, 26))
self.marqueeBackwards.setObjectName("marqueeBackwards")
self.presetModeWidget.addTab(self.marqueeTab, "")
self.coverMarqueeTab = QtWidgets.QWidget()
self.coverMarqueeTab.setObjectName("coverMarqueeTab")
self.groupBox_5 = QtWidgets.QGroupBox(self.coverMarqueeTab)
self.groupBox_5.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_5.setObjectName("groupBox_5")
self.coverMarqueeList = QtWidgets.QListWidget(self.groupBox_5)
self.coverMarqueeList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.coverMarqueeList.setObjectName("coverMarqueeList")
self.coverMarqueeAdd = QtWidgets.QPushButton(self.groupBox_5)
self.coverMarqueeAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.coverMarqueeAdd.setObjectName("coverMarqueeAdd")
self.coverMarqueeDelete = QtWidgets.QPushButton(self.groupBox_5)
self.coverMarqueeDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.coverMarqueeDelete.setObjectName("coverMarqueeDelete")
self.groupBox_15 = QtWidgets.QGroupBox(self.coverMarqueeTab)
self.groupBox_15.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_15.setObjectName("groupBox_15")
self.coverMarqueeSpeed = QtWidgets.QSlider(self.groupBox_15)
self.coverMarqueeSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.coverMarqueeSpeed.setMaximum(4)
self.coverMarqueeSpeed.setProperty("value", 2)
self.coverMarqueeSpeed.setOrientation(QtCore.Qt.Vertical)
self.coverMarqueeSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.coverMarqueeSpeed.setObjectName("coverMarqueeSpeed")
self.label_27 = QtWidgets.QLabel(self.groupBox_15)
self.label_27.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_27.setObjectName("label_27")
self.label_28 = QtWidgets.QLabel(self.groupBox_15)
self.label_28.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_28.setObjectName("label_28")
self.label_29 = QtWidgets.QLabel(self.groupBox_15)
self.label_29.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_29.setObjectName("label_29")
self.coverMarqueeBackwards = QtWidgets.QCheckBox(self.groupBox_15)
self.coverMarqueeBackwards.setGeometry(QtCore.QRect(20, 260, 89, 26))
self.coverMarqueeBackwards.setObjectName("coverMarqueeBackwards")
self.presetModeWidget.addTab(self.coverMarqueeTab, "")
self.pulseTab = QtWidgets.QWidget()
self.pulseTab.setObjectName("pulseTab")
self.groupBox_6 = QtWidgets.QGroupBox(self.pulseTab)
self.groupBox_6.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_6.setObjectName("groupBox_6")
self.pulseList = QtWidgets.QListWidget(self.groupBox_6)
self.pulseList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.pulseList.setObjectName("pulseList")
self.pulseAdd = QtWidgets.QPushButton(self.groupBox_6)
self.pulseAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.pulseAdd.setObjectName("pulseAdd")
self.pulseDelete = QtWidgets.QPushButton(self.groupBox_6)
self.pulseDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.pulseDelete.setObjectName("pulseDelete")
self.groupBox_16 = QtWidgets.QGroupBox(self.pulseTab)
self.groupBox_16.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_16.setObjectName("groupBox_16")
self.pulseSpeed = QtWidgets.QSlider(self.groupBox_16)
self.pulseSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.pulseSpeed.setMaximum(4)
self.pulseSpeed.setProperty("value", 2)
self.pulseSpeed.setOrientation(QtCore.Qt.Vertical)
self.pulseSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.pulseSpeed.setObjectName("pulseSpeed")
self.label_33 = QtWidgets.QLabel(self.groupBox_16)
self.label_33.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_33.setObjectName("label_33")
self.label_34 = QtWidgets.QLabel(self.groupBox_16)
self.label_34.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_34.setObjectName("label_34")
self.label_35 = QtWidgets.QLabel(self.groupBox_16)
self.label_35.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_35.setObjectName("label_35")
self.presetModeWidget.addTab(self.pulseTab, "")
self.spectrumTab = QtWidgets.QWidget()
self.spectrumTab.setObjectName("spectrumTab")
self.groupBox_17 = QtWidgets.QGroupBox(self.spectrumTab)
self.groupBox_17.setGeometry(QtCore.QRect(0, 0, 321, 361))
self.groupBox_17.setObjectName("groupBox_17")
self.spectrumSpeed = QtWidgets.QSlider(self.groupBox_17)
self.spectrumSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.spectrumSpeed.setMaximum(4)
self.spectrumSpeed.setProperty("value", 2)
self.spectrumSpeed.setOrientation(QtCore.Qt.Vertical)
self.spectrumSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.spectrumSpeed.setObjectName("spectrumSpeed")
self.label_39 = QtWidgets.QLabel(self.groupBox_17)
self.label_39.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_39.setObjectName("label_39")
self.label_40 = QtWidgets.QLabel(self.groupBox_17)
self.label_40.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_40.setObjectName("label_40")
self.label_41 = QtWidgets.QLabel(self.groupBox_17)
self.label_41.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_41.setObjectName("label_41")
self.spectrumBackwards = QtWidgets.QCheckBox(self.groupBox_17)
self.spectrumBackwards.setGeometry(QtCore.QRect(20, 260, 89, 26))
self.spectrumBackwards.setObjectName("spectrumBackwards")
self.presetModeWidget.addTab(self.spectrumTab, "")
self.alternatingTab = QtWidgets.QWidget()
self.alternatingTab.setObjectName("alternatingTab")
self.groupBox_7 = QtWidgets.QGroupBox(self.alternatingTab)
self.groupBox_7.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_7.setObjectName("groupBox_7")
self.alternatingList = QtWidgets.QListWidget(self.groupBox_7)
self.alternatingList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.alternatingList.setObjectName("alternatingList")
self.alternatingAdd = QtWidgets.QPushButton(self.groupBox_7)
self.alternatingAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.alternatingAdd.setObjectName("alternatingAdd")
self.alternatingDelete = QtWidgets.QPushButton(self.groupBox_7)
self.alternatingDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.alternatingDelete.setObjectName("alternatingDelete")
self.groupBox_18 = QtWidgets.QGroupBox(self.alternatingTab)
self.groupBox_18.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_18.setObjectName("groupBox_18")
self.alternatingSpeed = QtWidgets.QSlider(self.groupBox_18)
self.alternatingSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.alternatingSpeed.setMaximum(4)
self.alternatingSpeed.setProperty("value", 2)
self.alternatingSpeed.setOrientation(QtCore.Qt.Vertical)
self.alternatingSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.alternatingSpeed.setObjectName("alternatingSpeed")
self.label_45 = QtWidgets.QLabel(self.groupBox_18)
self.label_45.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_45.setObjectName("label_45")
self.label_46 = QtWidgets.QLabel(self.groupBox_18)
self.label_46.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_46.setObjectName("label_46")
self.label_47 = QtWidgets.QLabel(self.groupBox_18)
self.label_47.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_47.setObjectName("label_47")
self.alternatingSize = QtWidgets.QSlider(self.groupBox_18)
self.alternatingSize.setGeometry(QtCore.QRect(185, 70, 31, 160))
self.alternatingSize.setMaximum(3)
self.alternatingSize.setProperty("value", 2)
self.alternatingSize.setOrientation(QtCore.Qt.Vertical)
self.alternatingSize.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.alternatingSize.setObjectName("alternatingSize")
self.label_48 = QtWidgets.QLabel(self.groupBox_18)
self.label_48.setGeometry(QtCore.QRect(240, 210, 62, 20))
self.label_48.setObjectName("label_48")
self.label_49 = QtWidgets.QLabel(self.groupBox_18)
self.label_49.setGeometry(QtCore.QRect(180, 40, 62, 20))
self.label_49.setObjectName("label_49")
self.label_50 = QtWidgets.QLabel(self.groupBox_18)
self.label_50.setGeometry(QtCore.QRect(240, 60, 62, 20))
self.label_50.setObjectName("label_50")
self.alternatingBackwards = QtWidgets.QCheckBox(self.groupBox_18)
self.alternatingBackwards.setGeometry(QtCore.QRect(20, 260, 89, 26))
self.alternatingBackwards.setObjectName("alternatingBackwards")
self.alternatingMoving = QtWidgets.QCheckBox(self.groupBox_18)
self.alternatingMoving.setGeometry(QtCore.QRect(20, 290, 89, 26))
self.alternatingMoving.setObjectName("alternatingMoving")
self.presetModeWidget.addTab(self.alternatingTab, "")
self.candleTab = QtWidgets.QWidget()
self.candleTab.setObjectName("candleTab")
self.groupBox_8 = QtWidgets.QGroupBox(self.candleTab)
self.groupBox_8.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_8.setObjectName("groupBox_8")
self.candleList = QtWidgets.QListWidget(self.groupBox_8)
self.candleList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.candleList.setObjectName("candleList")
self.candleAdd = QtWidgets.QPushButton(self.groupBox_8)
self.candleAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.candleAdd.setObjectName("candleAdd")
self.candleDelete = QtWidgets.QPushButton(self.groupBox_8)
self.candleDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.candleDelete.setObjectName("candleDelete")
self.presetModeWidget.addTab(self.candleTab, "")
self.wingsTab = QtWidgets.QWidget()
self.wingsTab.setObjectName("wingsTab")
self.groupBox_9 = QtWidgets.QGroupBox(self.wingsTab)
self.groupBox_9.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_9.setObjectName("groupBox_9")
self.wingsList = QtWidgets.QListWidget(self.groupBox_9)
self.wingsList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.wingsList.setObjectName("wingsList")
self.wingsAdd = QtWidgets.QPushButton(self.groupBox_9)
self.wingsAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.wingsAdd.setObjectName("wingsAdd")
self.wingsDelete = QtWidgets.QPushButton(self.groupBox_9)
self.wingsDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.wingsDelete.setObjectName("wingsDelete")
self.groupBox_20 = QtWidgets.QGroupBox(self.wingsTab)
self.groupBox_20.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_20.setObjectName("groupBox_20")
self.wingsSpeed = QtWidgets.QSlider(self.groupBox_20)
self.wingsSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.wingsSpeed.setMaximum(4)
self.wingsSpeed.setProperty("value", 2)
self.wingsSpeed.setOrientation(QtCore.Qt.Vertical)
self.wingsSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.wingsSpeed.setObjectName("wingsSpeed")
self.label_57 = QtWidgets.QLabel(self.groupBox_20)
self.label_57.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_57.setObjectName("label_57")
self.label_58 = QtWidgets.QLabel(self.groupBox_20)
self.label_58.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_58.setObjectName("label_58")
self.label_59 = QtWidgets.QLabel(self.groupBox_20)
self.label_59.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_59.setObjectName("label_59")
self.presetModeWidget.addTab(self.wingsTab, "")
self.audioLevelTab = QtWidgets.QWidget()
self.audioLevelTab.setObjectName("audioLevelTab")
self.groupBox_21 = QtWidgets.QGroupBox(self.audioLevelTab)
self.groupBox_21.setGeometry(QtCore.QRect(240, 0, 321, 361))
self.groupBox_21.setObjectName("groupBox_21")
self.label_60 = QtWidgets.QLabel(self.groupBox_21)
self.label_60.setGeometry(QtCore.QRect(10, 30, 62, 20))
self.label_60.setObjectName("label_60")
self.label_61 = QtWidgets.QLabel(self.groupBox_21)
self.label_61.setGeometry(QtCore.QRect(10, 80, 81, 20))
self.label_61.setObjectName("label_61")
self.audioLevelTolerance = QtWidgets.QDoubleSpinBox(self.groupBox_21)
self.audioLevelTolerance.setGeometry(QtCore.QRect(10, 50, 68, 23))
self.audioLevelTolerance.setDecimals(1)
self.audioLevelTolerance.setProperty("value", 1.0)
self.audioLevelTolerance.setObjectName("audioLevelTolerance")
self.audioLevelSmooth = QtWidgets.QDoubleSpinBox(self.groupBox_21)
self.audioLevelSmooth.setGeometry(QtCore.QRect(10, 100, 68, 23))
self.audioLevelSmooth.setDecimals(0)
self.audioLevelSmooth.setProperty("value", 3.0)
self.audioLevelSmooth.setObjectName("audioLevelSmooth")
self.groupBox_10 = QtWidgets.QGroupBox(self.audioLevelTab)
self.groupBox_10.setGeometry(QtCore.QRect(0, 0, 231, 361))
self.groupBox_10.setObjectName("groupBox_10")
self.audioLevelList = QtWidgets.QListWidget(self.groupBox_10)
self.audioLevelList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.audioLevelList.setObjectName("audioLevelList")
self.audioLevelAdd = QtWidgets.QPushButton(self.groupBox_10)
self.audioLevelAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.audioLevelAdd.setObjectName("audioLevelAdd")
self.audioLevelDelete = QtWidgets.QPushButton(self.groupBox_10)
self.audioLevelDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.audioLevelDelete.setObjectName("audioLevelDelete")
self.presetModeWidget.addTab(self.audioLevelTab, "")
self.customTab = QtWidgets.QWidget()
self.customTab.setObjectName("customTab")
self.groupBox_22 = QtWidgets.QGroupBox(self.customTab)
self.groupBox_22.setGeometry(QtCore.QRect(630, 0, 321, 371))
self.groupBox_22.setObjectName("groupBox_22")
self.customSpeed = QtWidgets.QSlider(self.groupBox_22)
self.customSpeed.setGeometry(QtCore.QRect(15, 70, 31, 160))
self.customSpeed.setMaximum(4)
self.customSpeed.setProperty("value", 2)
self.customSpeed.setOrientation(QtCore.Qt.Vertical)
self.customSpeed.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.customSpeed.setObjectName("customSpeed")
self.label_62 = QtWidgets.QLabel(self.groupBox_22)
self.label_62.setGeometry(QtCore.QRect(10, 40, 62, 20))
self.label_62.setObjectName("label_62")
self.label_63 = QtWidgets.QLabel(self.groupBox_22)
self.label_63.setGeometry(QtCore.QRect(70, 60, 62, 20))
self.label_63.setObjectName("label_63")
self.label_64 = QtWidgets.QLabel(self.groupBox_22)
self.label_64.setGeometry(QtCore.QRect(70, 210, 62, 20))
self.label_64.setObjectName("label_64")
self.customMode = QtWidgets.QComboBox(self.groupBox_22)
self.customMode.setGeometry(QtCore.QRect(190, 70, 86, 25))
self.customMode.setObjectName("customMode")
self.customMode.addItem("")
self.customMode.addItem("")
self.customMode.addItem("")
self.label_65 = QtWidgets.QLabel(self.groupBox_22)
self.label_65.setGeometry(QtCore.QRect(190, 40, 62, 20))
self.label_65.setObjectName("label_65")
self.groupBox_19 = QtWidgets.QGroupBox(self.customTab)
self.groupBox_19.setGeometry(QtCore.QRect(0, 0, 611, 371))
self.groupBox_19.setObjectName("groupBox_19")
self.customTable = QtWidgets.QTableWidget(self.groupBox_19)
self.customTable.setGeometry(QtCore.QRect(10, 30, 471, 331))
self.customTable.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.customTable.setDragDropOverwriteMode(False)
self.customTable.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.customTable.setRowCount(40)
self.customTable.setColumnCount(2)
self.customTable.setObjectName("customTable")
item = QtWidgets.QTableWidgetItem()
self.customTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.customTable.setHorizontalHeaderItem(1, item)
self.customTable.verticalHeader().setVisible(False)
self.customEdit = QtWidgets.QPushButton(self.groupBox_19)
self.customEdit.setGeometry(QtCore.QRect(500, 40, 83, 28))
self.customEdit.setObjectName("customEdit")
self.presetModeWidget.addTab(self.customTab, "")
self.profileTab = QtWidgets.QWidget()
self.profileTab.setObjectName("profileTab")
self.groupBox_14 = QtWidgets.QGroupBox(self.profileTab)
self.groupBox_14.setGeometry(QtCore.QRect(0, 0, 421, 361))
self.groupBox_14.setObjectName("groupBox_14")
self.profileList = QtWidgets.QListWidget(self.groupBox_14)
self.profileList.setGeometry(QtCore.QRect(10, 50, 111, 291))
self.profileList.setObjectName("profileList")
self.profileAdd = QtWidgets.QPushButton(self.groupBox_14)
self.profileAdd.setGeometry(QtCore.QRect(130, 50, 83, 28))
self.profileAdd.setObjectName("profileAdd")
self.profileDelete = QtWidgets.QPushButton(self.groupBox_14)
self.profileDelete.setGeometry(QtCore.QRect(130, 90, 83, 28))
self.profileDelete.setObjectName("profileDelete")
self.profileRefresh = QtWidgets.QPushButton(self.groupBox_14)
self.profileRefresh.setGeometry(QtCore.QRect(130, 130, 83, 28))
self.profileRefresh.setObjectName("profileRefresh")
self.profileName = QtWidgets.QLineEdit(self.groupBox_14)
self.profileName.setGeometry(QtCore.QRect(300, 50, 113, 28))
self.profileName.setObjectName("profileName")
self.label_3 = QtWidgets.QLabel(self.groupBox_14)
self.label_3.setGeometry(QtCore.QRect(220, 50, 62, 20))
self.label_3.setObjectName("label_3")
self.presetModeWidget.addTab(self.profileTab, "")
self.animatedTab = QtWidgets.QWidget()
self.animatedTab.setObjectName("animatedTab")
self.groupBox_23 = QtWidgets.QGroupBox(self.animatedTab)
self.groupBox_23.setGeometry(QtCore.QRect(0, 0, 721, 371))
self.groupBox_23.setObjectName("groupBox_23")
self.animatedTable = QtWidgets.QTableWidget(self.groupBox_23)
self.animatedTable.setGeometry(QtCore.QRect(230, 30, 361, 331))
self.animatedTable.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.animatedTable.setDragDropOverwriteMode(False)
self.animatedTable.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.animatedTable.setRowCount(40)
self.animatedTable.setColumnCount(2)
self.animatedTable.setObjectName("animatedTable")
item = QtWidgets.QTableWidgetItem()
self.animatedTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.animatedTable.setHorizontalHeaderItem(1, item)
self.animatedTable.verticalHeader().setVisible(False)
self.animatedEdit = QtWidgets.QPushButton(self.groupBox_23)
self.animatedEdit.setGeometry(QtCore.QRect(610, 40, 83, 28))
self.animatedEdit.setObjectName("animatedEdit")
self.animatedList = QtWidgets.QListWidget(self.groupBox_23)
self.animatedList.setGeometry(QtCore.QRect(10, 40, 111, 291))
self.animatedList.setObjectName("animatedList")
self.animatedDelete = QtWidgets.QPushButton(self.groupBox_23)
self.animatedDelete.setGeometry(QtCore.QRect(130, 80, 83, 28))
self.animatedDelete.setObjectName("animatedDelete")
self.animatedAdd = QtWidgets.QPushButton(self.groupBox_23)
self.animatedAdd.setGeometry(QtCore.QRect(130, 40, 83, 28))
self.animatedAdd.setObjectName("animatedAdd")
self.animatedRoundName = QtWidgets.QLineEdit(self.groupBox_23)
self.animatedRoundName.setGeometry(QtCore.QRect(130, 120, 81, 25))
self.animatedRoundName.setObjectName("animatedRoundName")
self.groupBox_24 = QtWidgets.QGroupBox(self.animatedTab)
self.groupBox_24.setGeometry(QtCore.QRect(740, 0, 331, 371))
self.groupBox_24.setObjectName("groupBox_24")
self.label_66 = QtWidgets.QLabel(self.groupBox_24)
self.label_66.setGeometry(QtCore.QRect(10, 40, 201, 20))
self.label_66.setObjectName("label_66")
self.animatedSpeed = QtWidgets.QDoubleSpinBox(self.groupBox_24)
self.animatedSpeed.setGeometry(QtCore.QRect(10, 70, 68, 26))
self.animatedSpeed.setDecimals(0)
self.animatedSpeed.setMinimum(15.0)
self.animatedSpeed.setMaximum(5000.0)
self.animatedSpeed.setSingleStep(10.0)
self.animatedSpeed.setProperty("value", 50.0)
self.animatedSpeed.setObjectName("animatedSpeed")
self.label_21 = QtWidgets.QLabel(self.groupBox_24)
self.label_21.setGeometry(QtCore.QRect(40, 130, 261, 71))
self.label_21.setWordWrap(True)
self.label_21.setObjectName("label_21")
self.presetModeWidget.addTab(self.animatedTab, "")
self.modeWidget.addTab(self.presetTab, "")
self.timesTab = QtWidgets.QWidget()
self.timesTab.setObjectName("timesTab")
self.label_7 = QtWidgets.QLabel(self.timesTab)
self.label_7.setGeometry(QtCore.QRect(30, 20, 461, 17))
self.label_7.setObjectName("label_7")
self.offTime = QtWidgets.QLineEdit(self.timesTab)
self.offTime.setGeometry(QtCore.QRect(30, 40, 113, 25))
self.offTime.setObjectName("offTime")
self.onTime = QtWidgets.QLineEdit(self.timesTab)
self.onTime.setGeometry(QtCore.QRect(30, 100, 113, 25))
self.onTime.setObjectName("onTime")
self.label_8 = QtWidgets.QLabel(self.timesTab)
self.label_8.setGeometry(QtCore.QRect(30, 80, 461, 17))
self.label_8.setObjectName("label_8")
self.label_12 = QtWidgets.QLabel(self.timesTab)
self.label_12.setGeometry(QtCore.QRect(160, 50, 131, 17))
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.timesTab)
self.label_13.setGeometry(QtCore.QRect(160, 110, 131, 17))
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.timesTab)
self.label_14.setGeometry(QtCore.QRect(30, 140, 341, 111))
font = QtGui.QFont()
font.setPointSize(11)
self.label_14.setFont(font)
self.label_14.setWordWrap(True)
self.label_14.setObjectName("label_14")
self.timeSave = QtWidgets.QPushButton(self.timesTab)
self.timeSave.setGeometry(QtCore.QRect(40, 290, 82, 25))
self.timeSave.setObjectName("timeSave")
self.modeWidget.addTab(self.timesTab, "")
self.applyBtn = QtWidgets.QPushButton(self.centralwidget)
self.applyBtn.setGeometry(QtCore.QRect(490, 530, 101, 41))
self.applyBtn.setObjectName("applyBtn")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(940, 20, 62, 20))
self.label.setObjectName("label")
self.portTxt = QtWidgets.QLineEdit(self.centralwidget)
self.portTxt.setGeometry(QtCore.QRect(1000, 20, 113, 28))
self.portTxt.setObjectName("portTxt")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(180, -10, 741, 91))
font = QtGui.QFont()
font.setPointSize(13)
self.label_6.setFont(font)
self.label_6.setWordWrap(True)
self.label_6.setObjectName("label_6")
self.unitLEDBtn = QtWidgets.QPushButton(self.centralwidget)
self.unitLEDBtn.setGeometry(QtCore.QRect(840, 50, 121, 21))
self.unitLEDBtn.setObjectName("unitLEDBtn")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.modeWidget.setCurrentIndex(0)
self.presetModeWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.channel1Check, self.channel2Check)
MainWindow.setTabOrder(self.channel2Check, self.modeWidget)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "hue_plus"))
self.channel1Check.setText(_translate("MainWindow", "Channel 1"))
self.channel2Check.setText(_translate("MainWindow", "Channel 2"))
self.groupBox.setTitle(_translate("MainWindow", "Colors"))
self.fixedAdd.setText(_translate("MainWindow", "Add color"))
self.fixedDelete.setText(_translate("MainWindow", "Delete color"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.fixedTab), _translate("MainWindow", "Fixed"))
self.groupBox_2.setTitle(_translate("MainWindow", "Colors"))
self.breathingAdd.setText(_translate("MainWindow", "Add color"))
self.breathingDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_11.setTitle(_translate("MainWindow", "Other"))
self.label_2.setText(_translate("MainWindow", "Speed"))
self.label_4.setText(_translate("MainWindow", "Fastest"))
self.label_5.setText(_translate("MainWindow", "Slowest"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.breathingTab), _translate("MainWindow", "Breathing"))
self.groupBox_3.setTitle(_translate("MainWindow", "Colors"))
self.fadingAdd.setText(_translate("MainWindow", "Add color"))
self.fadingDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_12.setTitle(_translate("MainWindow", "Other"))
self.label_9.setText(_translate("MainWindow", "Speed"))
self.label_10.setText(_translate("MainWindow", "Fastest"))
self.label_11.setText(_translate("MainWindow", "Slowest"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.fadingTab), _translate("MainWindow", "Fading"))
self.groupBox_4.setTitle(_translate("MainWindow", "Colors"))
self.marqueeAdd.setText(_translate("MainWindow", "Add color"))
self.marqueeDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_13.setTitle(_translate("MainWindow", "Other"))
self.label_15.setText(_translate("MainWindow", "Speed"))
self.label_16.setText(_translate("MainWindow", "Fastest"))
self.label_17.setText(_translate("MainWindow", "Slowest"))
self.label_18.setText(_translate("MainWindow", "Smaller"))
self.label_19.setText(_translate("MainWindow", "Size"))
self.label_20.setText(_translate("MainWindow", "Larger"))
self.marqueeBackwards.setText(_translate("MainWindow", "Backwards"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.marqueeTab), _translate("MainWindow", "Marquee"))
self.groupBox_5.setTitle(_translate("MainWindow", "Colors"))
self.coverMarqueeAdd.setText(_translate("MainWindow", "Add color"))
self.coverMarqueeDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_15.setTitle(_translate("MainWindow", "Other"))
self.label_27.setText(_translate("MainWindow", "Speed"))
self.label_28.setText(_translate("MainWindow", "Fastest"))
self.label_29.setText(_translate("MainWindow", "Slowest"))
self.coverMarqueeBackwards.setText(_translate("MainWindow", "Backwards"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.coverMarqueeTab), _translate("MainWindow", "Covering Marquee"))
self.groupBox_6.setTitle(_translate("MainWindow", "Colors"))
self.pulseAdd.setText(_translate("MainWindow", "Add color"))
self.pulseDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_16.setTitle(_translate("MainWindow", "Other"))
self.label_33.setText(_translate("MainWindow", "Speed"))
self.label_34.setText(_translate("MainWindow", "Fastest"))
self.label_35.setText(_translate("MainWindow", "Slowest"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.pulseTab), _translate("MainWindow", "Pulse"))
self.groupBox_17.setTitle(_translate("MainWindow", "Other"))
self.label_39.setText(_translate("MainWindow", "Speed"))
self.label_40.setText(_translate("MainWindow", "Fastest"))
self.label_41.setText(_translate("MainWindow", "Slowest"))
self.spectrumBackwards.setText(_translate("MainWindow", "Backwards"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.spectrumTab), _translate("MainWindow", "Spectrum"))
self.groupBox_7.setTitle(_translate("MainWindow", "Colors"))
self.alternatingAdd.setText(_translate("MainWindow", "Add color"))
self.alternatingDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_18.setTitle(_translate("MainWindow", "Other"))
self.label_45.setText(_translate("MainWindow", "Speed"))
self.label_46.setText(_translate("MainWindow", "Fastest"))
self.label_47.setText(_translate("MainWindow", "Slowest"))
self.label_48.setText(_translate("MainWindow", "Smaller"))
self.label_49.setText(_translate("MainWindow", "Size"))
self.label_50.setText(_translate("MainWindow", "Larger"))
self.alternatingBackwards.setText(_translate("MainWindow", "Backwards"))
self.alternatingMoving.setText(_translate("MainWindow", "Moving"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.alternatingTab), _translate("MainWindow", "Alternating"))
self.groupBox_8.setTitle(_translate("MainWindow", "Colors"))
self.candleAdd.setText(_translate("MainWindow", "Add color"))
self.candleDelete.setText(_translate("MainWindow", "Delete color"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.candleTab), _translate("MainWindow", "Candle"))
self.groupBox_9.setTitle(_translate("MainWindow", "Colors"))
self.wingsAdd.setText(_translate("MainWindow", "Add color"))
self.wingsDelete.setText(_translate("MainWindow", "Delete color"))
self.groupBox_20.setTitle(_translate("MainWindow", "Other"))
self.label_57.setText(_translate("MainWindow", "Speed"))
self.label_58.setText(_translate("MainWindow", "Fastest"))
self.label_59.setText(_translate("MainWindow", "Slowest"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.wingsTab), _translate("MainWindow", "Wings"))
self.groupBox_21.setTitle(_translate("MainWindow", "Other"))
self.label_60.setText(_translate("MainWindow", "Tolerance"))
self.label_61.setText(_translate("MainWindow", "Smoothness"))
self.groupBox_10.setTitle(_translate("MainWindow", "Colors"))
self.audioLevelAdd.setText(_translate("MainWindow", "Add color"))
self.audioLevelDelete.setText(_translate("MainWindow", "Delete color"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.audioLevelTab), _translate("MainWindow", "Audio Level"))
self.groupBox_22.setTitle(_translate("MainWindow", "Other"))
self.label_62.setText(_translate("MainWindow", "Speed"))
self.label_63.setText(_translate("MainWindow", "Fastest"))
self.label_64.setText(_translate("MainWindow", "Slowest"))
self.customMode.setItemText(0, _translate("MainWindow", "Fixed"))
self.customMode.setItemText(1, _translate("MainWindow", "Breathing"))
self.customMode.setItemText(2, _translate("MainWindow", "Wave"))
self.label_65.setText(_translate("MainWindow", "Mode"))
self.groupBox_19.setTitle(_translate("MainWindow", "Colors"))
item = self.customTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "LED #"))
item = self.customTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Colors"))
self.customEdit.setText(_translate("MainWindow", "Edit Color"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.customTab), _translate("MainWindow", "Custom"))
self.groupBox_14.setTitle(_translate("MainWindow", "Profiles"))
self.profileAdd.setText(_translate("MainWindow", "Add profile"))
self.profileDelete.setText(_translate("MainWindow", "Delete profile"))
self.profileRefresh.setText(_translate("MainWindow", "Refresh"))
self.profileName.setText(_translate("MainWindow", "profile1"))
self.label_3.setText(_translate("MainWindow", "Name:"))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.profileTab), _translate("MainWindow", "Profiles"))
self.groupBox_23.setTitle(_translate("MainWindow", "Colors"))
item = self.animatedTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "LED #"))
item = self.animatedTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Colors"))
self.animatedEdit.setText(_translate("MainWindow", "Edit Color"))
self.animatedDelete.setText(_translate("MainWindow", "Delete round"))
self.animatedAdd.setText(_translate("MainWindow", "Add round"))
self.animatedRoundName.setText(_translate("MainWindow", "round1"))
self.groupBox_24.setTitle(_translate("MainWindow", "Other"))
self.label_66.setText(_translate("MainWindow", "Speed between refresh, in ms"))
self.label_21.setText(_translate("MainWindow", "To use, simply set a custom pattern for each round."))
self.presetModeWidget.setTabText(self.presetModeWidget.indexOf(self.animatedTab), _translate("MainWindow", "Custom Animated"))
self.modeWidget.setTabText(self.modeWidget.indexOf(self.presetTab), _translate("MainWindow", "Preset"))
self.label_7.setText(_translate("MainWindow", "The time to turn off the lights (in 24 hour time, separated by a colon)"))
self.offTime.setText(_translate("MainWindow", "00:00"))
self.onTime.setText(_translate("MainWindow", "00:00"))
self.label_8.setText(_translate("MainWindow", "The time to turn on the lights (in 24 hour time, separated by a colon)"))
self.label_12.setText(_translate("MainWindow", "00:00 means none"))
self.label_13.setText(_translate("MainWindow", "00:00 means none"))
self.label_14.setText(_translate("MainWindow", "This looks for a profile called previous and uses that. If that profile does not exist, the time will not work."))
self.timeSave.setText(_translate("MainWindow", "Save"))
self.modeWidget.setTabText(self.modeWidget.indexOf(self.timesTab), _translate("MainWindow", "Times"))
self.applyBtn.setText(_translate("MainWindow", "Apply"))
self.label.setText(_translate("MainWindow", "Port:"))
self.portTxt.setText(_translate("MainWindow", "/dev/ttyACM0"))
self.label_6.setText(_translate("MainWindow", "Now with support for turning on and off at specific times, audio on Windows, a way to make your own modes, and more!"))
self.unitLEDBtn.setText(_translate("MainWindow", "Toggle Unit LED"))
| kusti8/hue-plus | hue_plus/hue_gui.py | Python | gpl-3.0 | 45,771 |
import praw
import json
import requests
import tweepy
import time
import os
import csv
import re
import configparser
import urllib.parse
import sys
from glob import glob
from gfycat.client import GfycatClient
from imgurpython import ImgurClient
import distutils.core
import itertools
import photohash
from PIL import Image
import urllib.request
# Location of the configuration file
CONFIG_FILE = 'config.ini'
def strip_title(title):
# Shortlink is 22 characters long, plus one character for a space
if len(title) < 280:
return title
else:
return title[:276] + '...'
def save_file(img_url, file_path):
resp = requests.get(img_url, stream=True)
if resp.status_code == 200:
with open(file_path, 'wb') as image_file:
for chunk in resp:
image_file.write(chunk)
# Return the path of the image, which is always the same since we just overwrite images
return file_path
else:
print('[EROR] File failed to download. Status code: ' + str(resp.status_code))
return
def get_media(img_url, post_id):
if any(s in img_url for s in ('i.redd.it', 'i.reddituploads.com')):
file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
file_extension = os.path.splitext(img_url)[-1].lower()
# Fix for issue with i.reddituploads.com links not having a file extension in the URL
if not file_extension:
file_extension += '.jpg'
file_name += '.jpg'
img_url += '.jpg'
# Grab the GIF versions of .GIFV links
# When Tweepy adds support for video uploads, we can use grab the MP4 versions
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
file_name = file_name.replace('.gifv', '.gif')
img_url = img_url.replace('.gifv', '.gif')
# Download the file
file_path = IMAGE_DIR + '/' + file_name
print('[ OK ] Downloading file at URL ' + img_url + ' to ' + file_path + ', file type identified as ' + file_extension)
img = save_file(img_url, file_path)
return img
elif ('imgur.com' in img_url): # Imgur
try:
client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
except BaseException as e:
print ('[EROR] Error while authenticating with Imgur:', str(e))
return
# Working demo of regex: https://regex101.com/r/G29uGl/2
regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Imgur image/gallery ID
id = m.group(1)
if any(s in img_url for s in ('/a/', '/gallery/')): # Gallery links
images = client.get_album_images(id)
# Only the first image in a gallery is used
imgur_url = images[0].link
else: # Single image
imgur_url = client.get_image(id).link
# If the URL is a GIFV link, change it to a GIF
file_extension = os.path.splitext(imgur_url)[-1].lower()
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
img_url = imgur_url.replace('.gifv', '.gif')
# Download the image
file_path = IMAGE_DIR + '/' + id + file_extension
print('[ OK ] Downloading Imgur image at URL ' + imgur_url + ' to ' + file_path)
imgur_file = save_file(imgur_url, file_path)
# Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
if (file_extension == '.gif'):
# Open the file using the Pillow library
img = Image.open(imgur_file)
# Get the MIME type
mime = Image.MIME[img.format]
if (mime == 'image/gif'):
# Image is indeed a GIF, so it can be posted
img.close()
return imgur_file
else:
# Image is not actually a GIF, so don't post it
print('[EROR] Imgur has not processed a GIF version of this link, so it can not be posted')
img.close()
# Delete the image
try:
os.remove(imgur_file)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
return
else:
return imgur_file
else:
print('[EROR] Could not identify Imgur image/gallery ID in this URL:', img_url)
return
elif ('gfycat.com' in img_url): # Gfycat
gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
client = GfycatClient()
gfycat_info = client.query_gfy(gfycat_name)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
gfycat_url = gfycat_info['gfyItem']['max2mbGif']
file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' + file_path)
gfycat_file = save_file(gfycat_url, file_path)
return gfycat_file
elif ('giphy.com' in img_url): # Giphy
# Working demo of regex: https://regex101.com/r/o8m1kA/2
regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Giphy ID
id = m.group(3)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' + file_path)
giphy_file = save_file(giphy_url, file_path)
return giphy_file
else:
print('[EROR] Could not identify Giphy ID in this URL:', img_url)
return
else:
print('[WARN] Post', post_id, 'doesn\'t point to an image/GIF:', img_url)
return
def tweet_creator(subreddit_info):
post_dict = {}
print ('[ OK ] Getting posts from Reddit')
for submission in subreddit_info.hot(limit=POST_LIMIT):
# If the OP has deleted his account, save it as "a deleted user"
if submission.author is None:
submission.author = "a deleted user"
submission.author.name = "a deleted user"
else:
submission.author.name = "/u/" + submission.author.name
if (submission.over_18 and NSFW_POSTS_ALLOWED is False):
# Skip over NSFW posts if they are disabled in the config file
print('[ OK ] Skipping', submission.id, 'because it is marked as NSFW')
continue
else:
post_dict[strip_title(submission.title)] = [submission.id,submission.url,submission.shortlink,submission.author.name]
return post_dict
def setup_connection_reddit(subreddit):
print ('[ OK ] Setting up connection with Reddit...')
r = praw.Reddit(
user_agent='memebot',
client_id=REDDIT_AGENT,
client_secret=REDDIT_CLIENT_SECRET)
return r.subreddit(subreddit)
def duplicate_check(id):
value = False
with open(CACHE_CSV, 'rt', newline='') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if id in row:
value = True
return value
def hash_check(hash):
if hash:
value = False
# Only extract last three lines from cache file
post_list = []
with open(CACHE_CSV, 'rt', newline='') as f:
for line in f:
post_list.append(line)
if len(post_list) > REPOST_LIMIT:
post_list.pop(0)
if any(hash in s for s in post_list):
value = True
else:
value = True
return value
def log_post(id, hash, tweetID):
with open(CACHE_CSV, 'a', newline='') as cache:
date = time.strftime("%d/%m/%Y") + ' ' + time.strftime("%H:%M:%S")
wr = csv.writer(cache, delimiter=',')
wr.writerow([id, date, hash, tweetID])
def main():
# Make sure logging file and media directory exists
if not os.path.exists(CACHE_CSV):
with open(CACHE_CSV, 'w', newline='') as cache:
default = ['Post','Date and time','Image hash', 'Tweet link']
wr = csv.writer(cache)
wr.writerow(default)
print ('[ OK ] ' + CACHE_CSV + ' file not found, created a new one')
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
print ('[ OK ] ' + IMAGE_DIR + ' folder not found, created a new one')
# Continue with script
subreddit = setup_connection_reddit(SUBREDDIT_TO_MONITOR)
post_dict = tweet_creator(subreddit)
tweeter(post_dict)
def alt_tweeter(post_link, op, username, newestTweet):
try:
# Log into alternate account
auth = tweepy.OAuthHandler(ALT_CONSUMER_KEY, ALT_CONSUMER_SECRET)
auth.set_access_token(ALT_ACCESS_TOKEN, ALT_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# Post the tweet
tweetText = '@' + username + ' Originally posted by ' + op + ' on Reddit: ' + post_link
print('[ OK ] Posting this on alt Twitter account:', tweetText)
api.update_status(tweetText, newestTweet)
except BaseException as e:
print ('[EROR] Error while posting tweet on alt account:', str(e))
return
def tweeter(post_dict):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
for post in post_dict:
# Grab post details from dictionary
post_id = post_dict[post][0]
if not duplicate_check(post_id): # Make sure post is not a duplicate
file_path = get_media(post_dict[post][1], post_dict[post][0])
post_link = post_dict[post][2]
post_op = post_dict[post][3]
# Make sure the post contains media (if it doesn't, then file_path would be blank)
if (file_path):
# Scan the image against previously-posted images
try:
hash = photohash.average_hash(file_path)
print ('[ OK ] Image hash check:', hash_check(hash))
except:
# Set hash to an empty string if the check failed
hash = ""
print ('[WARN] Could not check image hash, skipping.')
# Only make a tweet if the post has not already been posted (if repost protection is enabled)
if ((REPOST_PROTECTION is True) and (hash_check(hash) is False)):
print ('[ OK ] Posting this on main twitter account:', post, file_path)
try:
# Post the tweet
api.update_with_media(filename=file_path, status=post)
# Log the tweet
username = api.me().screen_name
latestTweets = api.user_timeline(screen_name = username, count = 1, include_rts = False)
newestTweet = latestTweets[0].id_str
log_post(post_id, hash, 'https://twitter.com/' + username + '/status/' + newestTweet + '/')
# Post alt tweet
if ALT_ACCESS_TOKEN:
alt_tweeter(post_link, post_op, username, newestTweet)
else:
print('[WARN] No authentication info for alternate account in config.ini, skipping alt tweet.')
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
except BaseException as e:
print ('[EROR] Error while posting tweet:', str(e))
# Log the post anyways
log_post(post_id, hash, 'Error while posting tweet: ' + str(e))
else:
print ('[ OK ] Skipping', post_id, 'because it is a repost or Memebot previously failed to post it')
log_post(post_id, hash, 'Post was already tweeted or was identified as a repost')
# Cleanup media file
try:
os.remove(file_path)
print ('[ OK ] Deleted media file at ' + file_path)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
else:
print ('[ OK ] Ignoring', post_id, 'because there was not a media file downloaded')
else:
print ('[ OK ] Ignoring', post_id, 'because it was already posted')
if __name__ == '__main__':
# Check for updates
try:
with urllib.request.urlopen("https://raw.githubusercontent.com/corbindavenport/memebot/update-check/current-version.txt") as url:
s = url.read()
new_version = s.decode("utf-8").rstrip()
current_version = 3.0 # Current version of script
if (current_version < float(new_version)):
print('IMPORTANT: A new version of Memebot (' + str(new_version) + ') is available! (you have ' + str(current_version) + ')')
print ('IMPORTANT: Get the latest update from here: https://github.com/corbindavenport/memebot/releases')
else:
print('[ OK ] You have the latest version of Memebot (' + str(current_version) + ')')
except BaseException as e:
print ('[EROR] Error while checking for updates:', str(e))
# Make sure config file exists
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
except BaseException as e:
print ('[EROR] Error while reading config file:', str(e))
sys.exit()
# Create variables from config file
CACHE_CSV = config['BotSettings']['CacheFile']
IMAGE_DIR = config['BotSettings']['MediaFolder']
DELAY_BETWEEN_TWEETS = int(config['BotSettings']['DelayBetweenTweets'])
POST_LIMIT = int(config['BotSettings']['PostLimit'])
SUBREDDIT_TO_MONITOR = config['BotSettings']['SubredditToMonitor']
NSFW_POSTS_ALLOWED = bool(distutils.util.strtobool(config['BotSettings']['NSFWPostsAllowed']))
REPOST_PROTECTION = bool(distutils.util.strtobool(config['RepostSettings']['RepostProtection']))
REPOST_LIMIT = int(config['RepostSettings']['RepostLimit'])
ACCESS_TOKEN = config['PrimaryTwitterKeys']['AccessToken']
ACCESS_TOKEN_secret = config['PrimaryTwitterKeys']['AccessTokenSecret']
CONSUMER_KEY = config['PrimaryTwitterKeys']['ConsumerKey']
CONSUMER_SECRET = config['PrimaryTwitterKeys']['ConsumerSecret']
ALT_ACCESS_TOKEN = config['AltTwitterKeys']['AccessToken']
ALT_ACCESS_TOKEN_SECRET = config['AltTwitterKeys']['AccessTokenSecret']
ALT_CONSUMER_KEY = config['AltTwitterKeys']['ConsumerKey']
ALT_CONSUMER_SECRET = config['AltTwitterKeys']['ConsumerSecret']
REDDIT_AGENT = config['Reddit']['Agent']
REDDIT_CLIENT_SECRET = config['Reddit']['ClientSecret']
IMGUR_CLIENT = config['Imgur']['ClientID']
IMGUR_CLIENT_SECRET = config['Imgur']['ClientSecret']
# Set the command line window title on Windows
if os.name == 'nt':
try:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
username = api.me().screen_name
title = '@' + username + ' - Memebot'
except:
title = 'Memebot'
os.system('title ' + title)
# Run the main script
while True:
main()
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
print('[ OK ] Restarting main()...') | corbindavenport/me-irl-bot | memebot.py | Python | gpl-3.0 | 13,750 |
#------------------------------------------------------------------------------------------
#
# Copyright 2017 Robert Pengelly.
#
# This file is part of ppa-helper.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------------------
# coding: utf-8
from __future__ import unicode_literals
import collections
import os
import shutil
import sys
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines) | robertapengelly/ppa-helper | ppa_helper/compat.py | Python | gpl-3.0 | 3,031 |
# archive_test.py - unit tests for archive.py
# Authors:
# * Erich Blume <[email protected]>
#
# Copyright 2011 Erich Blume <[email protected]>
#
# This file is part of Tiger Lily.
#
# Tiger Lily is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tiger Lily is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tiger Lily. If not, see <http://www.gnu.org/licenses/>.
#
"""This module provides unit tests for the ``tigerlily.utility.archive``
module.
As with all unit test modules, the tests it contains can be executed in many
ways, but most easily by going to the project root dir and executing
``python3 setup.py nosetests``.
"""
import unittest
import os
import tigerlily.utility.archive as ar
from tigerlily.sequences import parseFASTA
class ArchiveTests(unittest.TestCase):
"""Test harness for ``tigerlily.utility.archive.Archive`` class.
"""
def setUp(self):
"""archive.py: Create the testing environment"""
self.test_dir = os.path.join(os.path.dirname(__file__),'test_archives')
self.targz = os.path.join(self.test_dir, 'test_fasta_archive.tar.gz')
self.tarbz2 = os.path.join(self.test_dir, 'test_fasta_archive.tar.bz2')
self.tar = os.path.join(self.test_dir, 'test_fasta_archive.tar')
self.zip = os.path.join(self.test_dir, 'test_fasta_archive.zip')
def test_targz(self):
"archive.py: Test .tar.gz archive support"
arch = ar.Archive(filepath=self.targz)
self._handle_arch(arch)
def test_tarbz2(self):
"archive.py: Test .tar.bz2 archive support"
arch = ar.Archive(filepath=self.tarbz2)
self._handle_arch(arch)
def test_tar(self):
"archive.py: Test .tar archive support"
arch = ar.Archive(filepath=self.tar)
self._handle_arch(arch)
def test_zip(self):
"archive.py: Test .zip archive support"
arch = ar.Archive(filepath=self.zip)
self._handle_arch(arch)
def _handle_arch(self,arch):
"handler for testing an Archive object regardless of format"
self.assertEqual(len(arch.getnames()),4)
fasta_files = [f for f in arch.getfasta()]
self.assertEqual(len(fasta_files),3)
nofasta_files = [f for f in arch.getnofasta()]
self.assertEqual(len(nofasta_files),1)
# Finally, just test to make sure that FASTA can handle the test files
# This isn't really a test of this unit, but it's a logical extension
# and it would be bad if it failed, so let's do the test.
for fasta_fileobj in arch.getfasta():
for fasta_seq in parseFASTA(fasta_fileobj):
self.assertTrue(len(fasta_seq.sequence) > 0)
self.assertTrue(fasta_seq.identifier.startswith('seq'))
| eblume/Tiger-Lily | tigerlily/utility/archive_test.py | Python | gpl-3.0 | 3,252 |
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._roberts.
Roberts similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Roberts']
class Roberts(_TokenDistance):
r"""Roberts similarity.
For two multisets X and Y drawn from an alphabet S, Roberts similarity
:cite:`Roberts:1986` is
.. math::
sim_{Roberts}(X, Y) =
\frac{\Big[\sum_{i \in S} (X_i + Y_i) \cdot
\frac{min(X_i, Y_i)}{max(X_i, Y_i)}\Big]}
{\sum_{i \in S} (X_i + Y_i)}
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Roberts instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Roberts, self).__init__(tokenizer=tokenizer, **kwargs)
def sim(self, src: str, tar: str) -> float:
"""Return the Roberts similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Roberts similarity
Examples
--------
>>> cmp = Roberts()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.36363636363636365
>>> cmp.sim('aluminum', 'Catalan')
0.11764705882352941
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
(self._src_tokens[i] + self._tar_tokens[i])
* min(self._src_tokens[i], self._tar_tokens[i])
/ max(self._src_tokens[i], self._tar_tokens[i])
for i in alphabet
) / sum((self._src_tokens[i] + self._tar_tokens[i]) for i in alphabet)
if __name__ == '__main__':
import doctest
doctest.testmod()
| chrislit/abydos | abydos/distance/_roberts.py | Python | gpl-3.0 | 3,233 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Graph.slug'
db.alter_column('muparse_graph', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=128, null=True))
# Adding index on 'Graph', fields ['slug']
db.create_index('muparse_graph', ['slug'])
# Changing field 'Graph.name'
db.alter_column('muparse_graph', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Removing index on 'Graph', fields ['name']
db.delete_index('muparse_graph', ['name'])
def backwards(self, orm):
# Adding index on 'Graph', fields ['name']
db.create_index('muparse_graph', ['name'])
# Removing index on 'Graph', fields ['slug']
db.delete_index('muparse_graph', ['slug'])
# Changing field 'Graph.slug'
db.alter_column('muparse_graph', 'slug', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'Graph.name'
db.alter_column('muparse_graph', 'name', self.gf('django.db.models.fields.SlugField')(max_length=255))
models = {
'muparse.graph': {
'Meta': {'ordering': "['name']", 'object_name': 'Graph'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.GraphCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'muparse.graphcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'GraphCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'muparse.node': {
'Meta': {'ordering': "['name']", 'object_name': 'Node'},
'graphs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['muparse.Graph']", 'null': 'True', 'through': "orm['muparse.NodeGraphs']", 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.NodeGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'muparse.nodegraphs': {
'Meta': {'object_name': 'NodeGraphs'},
'baseurl': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'graph': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.Graph']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.Node']"}),
'pageurl': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'muparse.nodegroup': {
'Meta': {'ordering': "['name']", 'object_name': 'NodeGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'muparse.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'graphs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['muparse.NodeGraphs']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['muparse'] | grnet/mupy | muparse/migrations/0004_auto__chg_field_graph_slug__chg_field_graph_name.py | Python | gpl-3.0 | 4,828 |
# Standard Library
from builtins import str
import os
import zipfile
from urllib.parse import quote_plus
from urllib.request import urlopen
# Third Party Stuff
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Q
# Spoken Tutorial Stuff
from creation.models import *
from creation.views import (
is_administrator,
is_contenteditor,
is_contributor,
is_domainreviewer,
is_external_contributor,
is_internal_contributor,
is_qualityreviewer,
is_videoreviewer,
is_language_manager
)
from spoken.forms import TutorialSearchForm
register = template.Library()
def format_component_title(name):
return name.replace('_', ' ').capitalize()
def get_url_name(name):
return quote_plus(name)
def get_zip_content(path):
file_names = None
try:
zf = zipfile.ZipFile(path, 'r')
file_names = zf.namelist()
return file_names
except Exception as e:
return False
def is_script_available(path):
try:
code = urlopen(script_path).code
except Exception as e:
code = e.code
if(int(code) == 200):
return True
return False
def get_review_status_list(key):
status_list = ['Pending', 'Waiting for Admin Review', 'Waiting for Domain Review', 'Waiting for Quality Review', 'Accepted', 'Need Improvement', 'Not Required']
return status_list[key];
def get_review_status_class(key):
status_list = ['danger', 'active', 'warning', 'info', 'success', 'danger', 'success']
return status_list[key];
def get_review_status_symbol(key):
status_list = ['fa fa-1 fa-minus-circle review-pending-upload', 'fa fa-1 fa-check-circle review-admin-review', 'fa fa-1 fa-check-circle review-domain-review', 'fa fa-1 fa-check-circle review-quality-review', 'fa fa-1 fa-check-circle review-accepted', 'fa fa-1 fa-times-circle review-pending-upload', 'fa fa-1 fa-ban review-accepted']
return status_list[key];
def get_username(key):
user = User.objects.get(pk = key)
return user.username
def get_last_video_upload_time(key):
rec = None
try:
rec = ContributorLog.objects.filter(tutorial_resource_id = key.id).order_by('-created')[0]
tmpdt = key.updated
for tmp in rec:
tmpdt = rec.created
return tmpdt
except:
return key.updated
def get_component_name(comp):
comps = {
1: 'Outline',
2: 'Script',
3: 'Video',
4: 'Slides',
5: 'Codefiles',
6: 'Assignment'
}
key = ''
try:
key = comps[comp]
except:
pass
return key.title()
def get_missing_component_reply(mcid):
rows = TutorialMissingComponentReply.objects.filter(missing_component_id = mcid)
replies = ''
for row in rows:
replies += '<p>' + row.reply_message + '<b> -' + row.user.username + '</b></p>'
if replies:
replies = '<br /><b>Replies:</b>' + replies
return replies
def formatismp4(path):
'''
** Registered to be used in jinja template **
Function takes in a file name and checks if the
last 3 characters are `mp4`.
'''
return path[-3:] == 'mp4' or path[-3:] == 'mov'
def instruction_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
return file_path
return False
def installation_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
return file_path
return False
def brochure(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
return file_path
return False
def get_thumb_path(row, append_str):
path = settings.MEDIA_URL + 'videos/' + str(row.foss_id) + '/' + str(row.id) + '/' + row.tutorial.replace(' ', '-') + '-' + append_str + '.png'
return path
def get_srt_path(tr):
data = ''
english_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt'
if os.path.isfile(english_srt):
data = '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt' + '" srclang="en" label="English"></track>'
if tr.language.name != 'English':
native_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name +'.srt'
print(native_srt)
if os.path.isfile(native_srt):
data += '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name + '.srt' + '" srclang="en" label="' + tr.language.name + '"></track>'
return data
def get_video_visits(tr):
tr.hit_count = tr.hit_count + 1
tr.save()
return tr.hit_count
def get_prerequisite(tr, td):
print((tr, td))
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = tr.language_id)
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/' + tr_rec.language.name
except Exception as e:
print(e)
if tr.language.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/English'
except:
return None
pass
return None
def get_prerequisite_from_td(td, lang):
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = lang.id)
return tr_rec.id
except:
if lang.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return tr_rec.id
except:
pass
return None
def get_timed_script(script_path, timed_script_path):
if timed_script_path:
timed_script = settings.SCRIPT_URL + timed_script_path
else:
timed_script = settings.SCRIPT_URL + script_path + '-timed'
print(script_path)
code = 0
try:
code = urlopen(timed_script).code
except Exception as e:
timed_script = settings.SCRIPT_URL + \
script_path.replace(' ', '-').replace('_', '-') + '-timed'
print(timed_script)
try:
code = urlopen(timed_script).code
except Exception as e:
print((code, '----', e))
code = 0
if(int(code) == 200):
return timed_script
return ''
def tutorialsearch():
context = {
'form': TutorialSearchForm()
}
return context
def get_mp4_video(tr):
video_name = tr.video
splitat = -4
tname, text = video_name[:splitat], video_name[splitat:]
path = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tname + '.mp4'
if os.path.isfile(path):
return 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tname + '.mp4'
return False
register.inclusion_tag('spoken/templates/tutorial_search_form.html')(tutorialsearch)
#register.filter('tutorialsearch', tutorialsearch)
register.filter('get_timed_script', get_timed_script)
register.filter('formatismp4', formatismp4)
register.filter('get_prerequisite_from_td', get_prerequisite_from_td)
register.filter('get_prerequisite', get_prerequisite)
register.filter('get_video_visits', get_video_visits)
register.filter('get_srt_path', get_srt_path)
register.filter('get_thumb_path', get_thumb_path)
register.filter('get_missing_component_reply', get_missing_component_reply)
register.filter('get_component_name', get_component_name)
register.filter('get_url_name', get_url_name)
register.filter('get_zip_content', get_zip_content)
register.filter('get_contributor', is_contributor)
register.filter('get_internal_contributor', is_internal_contributor)
register.filter('get_external_contributor', is_external_contributor)
register.filter('get_videoreviewer', is_videoreviewer)
register.filter('get_domainreviewer', is_domainreviewer)
register.filter('get_qualityreviewer', is_qualityreviewer)
register.filter('get_administrator', is_administrator)
register.filter('get_last_video_upload_time', get_last_video_upload_time)
register.filter('get_review_status_list', get_review_status_list)
register.filter('get_review_status_symbol', get_review_status_symbol)
register.filter('get_review_status_class', get_review_status_class)
register.filter('get_username', get_username)
register.filter('instruction_sheet', instruction_sheet)
register.filter('installation_sheet', installation_sheet)
register.filter('brochure', brochure)
register.filter('get_contenteditor', is_contenteditor)
register.filter('format_component_title', format_component_title)
register.filter('get_mp4_video', get_mp4_video)
register.filter('get_language_manager',is_language_manager) | Spoken-tutorial/spoken-website | creation/templatetags/creationdata.py | Python | gpl-3.0 | 11,230 |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# The 'sysconfig' module requires Makefile and pyconfig.h files from
# Python installation. 'sysconfig' parses these files to get some
# information from them.
# TODO Verify that bundling Makefile and pyconfig.h is still required for Python 3.
import sysconfig
import os
from PyInstaller.utils.hooks import relpath_to_config_or_make
_CONFIG_H = sysconfig.get_config_h_filename()
if hasattr(sysconfig, 'get_makefile_filename'):
# sysconfig.get_makefile_filename is missing in Python < 2.7.9
_MAKEFILE = sysconfig.get_makefile_filename()
else:
_MAKEFILE = sysconfig._get_makefile_filename()
datas = [(_CONFIG_H, relpath_to_config_or_make(_CONFIG_H))]
# The Makefile does not exist on all platforms, eg. on Windows
if os.path.exists(_MAKEFILE):
datas.append((_MAKEFILE, relpath_to_config_or_make(_MAKEFILE)))
| ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-sysconfig.py | Python | gpl-3.0 | 1,238 |
#!/usr/bin/env python
"""
crate_anon/nlp_manager/number.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal ([email protected]).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Number conversion functions.**
"""
from typing import Optional
def to_float(s: str) -> Optional[float]:
"""
Convert a string to a float, or return ``None``.
Before converting:
- strips out commas (as thousands separator); this is not internationalized
well!
- replace Unicode minus and en dash with a hyphen (minus sign)
"""
if s:
s = s.replace(',', '') # comma as thousands separator
s = s.replace('−', '-') # Unicode minus
s = s.replace('–', '-') # en dash
try:
return float(s)
except (TypeError, ValueError):
return None
def to_pos_float(s: str) -> Optional[float]:
"""
Converts a string to a positive float, by using :func:`to_float` followed
by :func:`abs`. Returns ``None`` on failure.
"""
try:
return abs(to_float(s))
except TypeError: # to_float() returned None
return None
| RudolfCardinal/crate | crate_anon/nlp_manager/number.py | Python | gpl-3.0 | 1,874 |
"""!
@brief Cluster analysis algorithm: Expectation-Maximization Algorithm for Gaussian Mixture Model.
@details Implementation based on paper @cite article::ema::1.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.kmeans import kmeans
from pyclustering.utils import pi, calculate_ellipse_description, euclidean_distance_square
from enum import IntEnum
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import patches
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
class ema_init_type(IntEnum):
"""!
@brief Enumeration of initialization types for Expectation-Maximization algorithm.
"""
## Means are randomly taken from input dataset and variance or covariance is calculated based on
## spherical data that belongs to the chosen means.
RANDOM_INITIALIZATION = 0
## Two step initialization. The first is calculation of initial centers using K-Means++ method.
## The second is K-Means clustering using obtained centers in the first step. Obtained clusters
## and its centers are used for calculation of variance (covariance in case of multi-dimensional)
## data.
KMEANS_INITIALIZATION = 1
class ema_initializer():
"""!
@brief Provides services for preparing initial means and covariances for Expectation-Maximization algorithm.
@details Initialization strategy is defined by enumerator 'ema_init_type': random initialization and
kmeans with kmeans++ initialization. Here an example of initialization using kmeans strategy:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FAMOUS_SAMPLES
from pyclustering.cluster.ema import ema_initializer
sample = read_sample(FAMOUS_SAMPLES.SAMPLE_OLD_FAITHFUL)
amount_clusters = 2
initial_means, initial_covariance = ema_initializer(sample, amount_clusters).initialize()
print(initial_means)
print(initial_covariance)
@endcode
"""
__MAX_GENERATION_ATTEMPTS = 10
def __init__(self, sample, amount):
"""!
@brief Constructs EM initializer.
@param[in] sample (list): Data that will be used by the EM algorithm.
@param[in] amount (uint): Amount of clusters that should be allocated by the EM algorithm.
"""
self.__sample = sample
self.__amount = amount
def initialize(self, init_type = ema_init_type.KMEANS_INITIALIZATION):
"""!
@brief Calculates initial parameters for EM algorithm: means and covariances using
specified strategy.
@param[in] init_type (ema_init_type): Strategy for initialization.
@return (float|list, float|numpy.array) Initial means and variance (covariance matrix in case multi-dimensional data).
"""
if init_type == ema_init_type.KMEANS_INITIALIZATION:
return self.__initialize_kmeans()
elif init_type == ema_init_type.RANDOM_INITIALIZATION:
return self.__initialize_random()
raise NameError("Unknown type of EM algorithm initialization is specified.")
def __calculate_initial_clusters(self, centers):
"""!
@brief Calculate Euclidean distance to each point from the each cluster.
@brief Nearest points are captured by according clusters and as a result clusters are updated.
@return (list) updated clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(centers))]
for index_point in range(len(self.__sample)):
index_optim, dist_optim = -1, 0.0
for index in range(len(centers)):
dist = euclidean_distance_square(self.__sample[index_point], centers[index])
if (dist < dist_optim) or (index == 0):
index_optim, dist_optim = index, dist
clusters[index_optim].append(index_point)
return clusters
def __calculate_initial_covariances(self, initial_clusters):
covariances = []
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [self.__sample[index_point] for index_point in initial_cluster]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return covariances
def __initialize_random(self):
initial_means = []
for _ in range(self.__amount):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts = 0
while (mean in initial_means) and (attempts < ema_initializer.__MAX_GENERATION_ATTEMPTS):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts += 1
if attempts == ema_initializer.__MAX_GENERATION_ATTEMPTS:
mean = [ value + (random.random() - 0.5) * value * 0.2 for value in mean ]
initial_means.append(mean)
initial_clusters = self.__calculate_initial_clusters(initial_means)
initial_covariance = self.__calculate_initial_covariances(initial_clusters)
return initial_means, initial_covariance
def __initialize_kmeans(self):
initial_centers = kmeans_plusplus_initializer(self.__sample, self.__amount).initialize()
kmeans_instance = kmeans(self.__sample, initial_centers, ccore = True)
kmeans_instance.process()
means = kmeans_instance.get_centers()
covariances = []
initial_clusters = kmeans_instance.get_clusters()
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [ self.__sample[index_point] for index_point in initial_cluster ]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return means, covariances
class ema_observer:
"""!
@brief Observer of EM algorithm for collecting algorithm state on each step.
@details It can be used to obtain whole picture about clustering process of EM algorithm. Allocated clusters,
means and covariances are stored in observer on each step. Here an example of usage:
@code
from pyclustering.cluster.ema import ema, ema_observer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read data from text file.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Create EM observer.
observer = ema_observer()
# Create EM algorithm to allocated four clusters and pass observer to it.
ema_instance = ema(sample, 4, observer=observer)
# Run clustering process.
ema_instance.process()
# Print amount of steps that were done by the algorithm.
print("EMA steps:", observer.get_iterations())
# Print evolution of means and covariances.
print("Means evolution:", observer.get_evolution_means())
print("Covariances evolution:", observer.get_evolution_covariances())
# Print evolution of clusters.
print("Clusters evolution:", observer.get_evolution_clusters())
# Print final clusters.
print("Allocated clusters:", observer.get_evolution_clusters()[-1])
@endcode
"""
def __init__(self):
"""!
@brief Initializes EM observer.
"""
self.__means_evolution = []
self.__covariances_evolution = []
self.__clusters_evolution = []
def __len__(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_iterations(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
"""
return len(self.__means_evolution)
def get_evolution_means(self):
"""!
@return (list) Mean of each cluster on each step of clustering.
"""
return self.__means_evolution
def get_evolution_covariances(self):
"""!
@return (list) Covariance matrix (or variance in case of one-dimensional data) of each cluster on each step of clustering.
"""
return self.__covariances_evolution
def get_evolution_clusters(self):
"""!
@return (list) Allocated clusters on each step of clustering.
"""
return self.__clusters_evolution
def notify(self, means, covariances, clusters):
"""!
@brief This method is used by the algorithm to notify observer about changes where the algorithm
should provide new values: means, covariances and allocated clusters.
@param[in] means (list): Mean of each cluster on currect step.
@param[in] covariances (list): Covariances of each cluster on current step.
@param[in] clusters (list): Allocated cluster on current step.
"""
self.__means_evolution.append(means)
self.__covariances_evolution.append(covariances)
self.__clusters_evolution.append(clusters)
class ema_visualizer:
"""!
@brief Visualizer of EM algorithm's results.
@details Provides services for visualization of particular features of the algorithm, for example,
in case of two-dimensional dataset it shows covariance ellipses.
"""
@staticmethod
def show_clusters(clusters, sample, covariances, means, figure=None, display=True):
"""!
@brief Draws clusters and in case of two-dimensional dataset draws their ellipses.
@details Allocated figure by this method should be closed using `close()` method of this visualizer.
@param[in] clusters (list): Clusters that were allocated by the algorithm.
@param[in] sample (list): Dataset that were used for clustering.
@param[in] covariances (list): Covariances of the clusters.
@param[in] means (list): Means of the clusters.
@param[in] figure (figure): If 'None' then new is figure is creater, otherwise specified figure is used
for visualization.
@param[in] display (bool): If 'True' then figure will be shown by the method, otherwise it should be
shown manually using matplotlib function 'plt.show()'.
@return (figure) Figure where clusters were drawn.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
if figure is None:
figure = visualizer.show(display=False)
else:
visualizer.show(figure=figure, display=False)
if len(sample[0]) == 2:
ema_visualizer.__draw_ellipses(figure, visualizer, clusters, covariances, means)
if display is True:
plt.show()
return figure
@staticmethod
def close(figure):
"""!
@brief Closes figure object that was used or allocated by the visualizer.
@param[in] figure (figure): Figure object that was used or allocated by the visualizer.
"""
plt.close(figure)
@staticmethod
def animate_cluster_allocation(data, observer, animation_velocity = 75, movie_fps = 1, save_movie = None):
"""!
@brief Animates clustering process that is performed by EM algorithm.
@param[in] data (list): Dataset that is used for clustering.
@param[in] observer (ema_observer): EM observer that was used for collection information about clustering process.
@param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only).
@param[in] movie_fps (uint): Defines frames per second (for rendering movie only).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure()
def init_frame():
return frame_generation(0)
def frame_generation(index_iteration):
figure.clf()
figure.suptitle("EM algorithm (iteration: " + str(index_iteration) +")", fontsize = 18, fontweight = 'bold')
clusters = observer.get_evolution_clusters()[index_iteration]
covariances = observer.get_evolution_covariances()[index_iteration]
means = observer.get_evolution_means()[index_iteration]
ema_visualizer.show_clusters(clusters, data, covariances, means, figure, False)
figure.subplots_adjust(top=0.85)
return [figure.gca()]
iterations = len(observer)
cluster_animation = animation.FuncAnimation(figure, frame_generation, iterations, interval = animation_velocity, init_func = init_frame, repeat_delay = 5000)
if save_movie is not None:
cluster_animation.save(save_movie, writer='ffmpeg', fps=movie_fps, bitrate=1500)
else:
plt.show()
plt.close(figure)
@staticmethod
def __draw_ellipses(figure, visualizer, clusters, covariances, means):
ax = figure.get_axes()[0]
for index in range(len(clusters)):
angle, width, height = calculate_ellipse_description(covariances[index])
color = visualizer.get_cluster_color(index, 0)
ema_visualizer.__draw_ellipse(ax, means[index][0], means[index][1], angle, width, height, color)
@staticmethod
def __draw_ellipse(ax, x, y, angle, width, height, color):
if (width > 0.0) and (height > 0.0):
ax.plot(x, y, color=color, marker='x', markersize=6)
ellipse = patches.Ellipse((x, y), width, height, alpha=0.2, angle=-angle, linewidth=2, fill=True, zorder=2, color=color)
ax.add_patch(ellipse)
class ema:
"""!
@brief Expectation-Maximization clustering algorithm for Gaussian Mixture Model (GMM).
@details The algorithm provides only clustering services (unsupervised learning).
Here an example of data clustering process:
@code
from pyclustering.cluster.ema import ema, ema_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read data from text file.
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# Create EM algorithm to allocated four clusters.
ema_instance = ema(sample, 3)
# Run clustering process.
ema_instance.process()
# Get clustering results.
clusters = ema_instance.get_clusters()
covariances = ema_instance.get_covariances()
means = ema_instance.get_centers()
# Visualize obtained clustering results.
ema_visualizer.show_clusters(clusters, sample, covariances, means)
@endcode
Here is clustering results of the Expectation-Maximization clustering algorithm where popular sample 'OldFaithful' was used.
Initial random means and covariances were used in the example. The first step is presented on the left side of the figure and
final result (the last step) is on the right side:
@image html ema_old_faithful_clustering.png
@see ema_visualizer
@see ema_observer
"""
def __init__(self, data, amount_clusters, means=None, variances=None, observer=None, tolerance=0.00001, iterations=100):
"""!
@brief Initializes Expectation-Maximization algorithm for cluster analysis.
@param[in] data (list): Dataset that should be analysed and where each point (object) is represented by the list of coordinates.
@param[in] amount_clusters (uint): Amount of clusters that should be allocated.
@param[in] means (list): Initial means of clusters (amount of means should be equal to amount of clusters for allocation).
If this parameter is 'None' then K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] variances (list): Initial cluster variances (or covariances in case of multi-dimensional data). Amount of
covariances should be equal to amount of clusters that should be allocated. If this parameter is 'None' then
K-Means algorithm with K-Means++ method will be used for initialization by default.
@param[in] observer (ema_observer): Observer for gathering information about clustering process.
@param[in] tolerance (float): Defines stop condition of the algorithm (when difference between current and
previous log-likelihood estimation is less then 'tolerance' then clustering is over).
@param[in] iterations (uint): Additional stop condition parameter that defines maximum number of steps that can be
performed by the algorithm during clustering process.
"""
self.__data = numpy.array(data)
self.__amount_clusters = amount_clusters
self.__tolerance = tolerance
self.__iterations = iterations
self.__observer = observer
self.__means = means
self.__variances = variances
self.__verify_arguments()
if (means is None) or (variances is None):
self.__means, self.__variances = ema_initializer(data, amount_clusters).initialize(ema_init_type.KMEANS_INITIALIZATION)
if len(self.__means) != amount_clusters:
self.__amount_clusters = len(self.__means)
self.__rc = [ [0.0] * len(self.__data) for _ in range(amount_clusters) ]
self.__pic = [1.0] * amount_clusters
self.__clusters = []
self.__gaussians = [ [] for _ in range(amount_clusters) ]
self.__stop = False
def process(self):
"""!
@brief Run clustering process of the algorithm.
@return (ema) Returns itself (EMA instance).
"""
previous_likelihood = -200000
current_likelihood = -100000
current_iteration = 0
while(self.__stop is False) and (abs(previous_likelihood - current_likelihood) > self.__tolerance) and (current_iteration < self.__iterations):
self.__expectation_step()
self.__maximization_step()
current_iteration += 1
self.__extract_clusters()
self.__notify()
previous_likelihood = current_likelihood
current_likelihood = self.__log_likelihood()
self.__stop = self.__get_stop_condition()
self.__normalize_probabilities()
return self
def get_clusters(self):
"""!
@return (list) Allocated clusters where each cluster is represented by list of indexes of points from dataset,
for example, two cluster may have following representation [[0, 1, 4], [2, 3, 5, 6]].
"""
return self.__clusters
def get_centers(self):
"""!
@return (list) Corresponding centers (means) of clusters.
"""
return self.__means
def get_covariances(self):
"""!
@return (list) Corresponding variances (or covariances in case of multi-dimensional data) of clusters.
"""
return self.__variances
def get_probabilities(self):
"""!
@brief Returns 2-dimensional list with belong probability of each object from data to cluster correspondingly,
where that first index is for cluster and the second is for point.
@code
# Get belong probablities
probabilities = ema_instance.get_probabilities();
# Show porbability of the fifth element in the first and in the second cluster
index_point = 5;
print("Probability in the first cluster:", probabilities[0][index_point]);
print("Probability in the first cluster:", probabilities[1][index_point]);
@endcode
@return (list) 2-dimensional list with belong probability of each object from data to cluster.
"""
return self.__rc
def __erase_empty_clusters(self):
clusters, means, variances, pic, gaussians, rc = [], [], [], [], [], []
for index_cluster in range(len(self.__clusters)):
if len(self.__clusters[index_cluster]) > 0:
clusters.append(self.__clusters[index_cluster])
means.append(self.__means[index_cluster])
variances.append(self.__variances[index_cluster])
pic.append(self.__pic[index_cluster])
gaussians.append(self.__gaussians[index_cluster])
rc.append(self.__rc[index_cluster])
if len(self.__clusters) != len(clusters):
self.__clusters, self.__means, self.__variances, self.__pic = clusters, means, variances, pic
self.__gaussians, self.__rc = gaussians, rc
self.__amount_clusters = len(self.__clusters)
def __notify(self):
if self.__observer is not None:
self.__observer.notify(self.__means, self.__variances, self.__clusters)
def __extract_clusters(self):
self.__clusters = [[] for _ in range(self.__amount_clusters)]
for index_point in range(len(self.__data)):
candidates = []
for index_cluster in range(self.__amount_clusters):
candidates.append((index_cluster, self.__rc[index_cluster][index_point]))
index_winner = max(candidates, key=lambda candidate: candidate[1])[0]
self.__clusters[index_winner].append(index_point)
self.__erase_empty_clusters()
def __log_likelihood(self):
likelihood = 0.0
for index_point in range(len(self.__data)):
particle = 0.0
for index_cluster in range(self.__amount_clusters):
particle += self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point]
if particle > 0.0:
likelihood += numpy.log(particle)
return likelihood
def __probabilities(self, index_cluster, index_point):
divider = 0.0
for i in range(self.__amount_clusters):
divider += self.__pic[i] * self.__gaussians[i][index_point]
if (divider != 0.0) and (divider != float('inf')):
return self.__pic[index_cluster] * self.__gaussians[index_cluster][index_point] / divider
return 1.0
def __expectation_step(self):
self.__gaussians = [ [] for _ in range(self.__amount_clusters) ]
for index in range(self.__amount_clusters):
self.__gaussians[index] = gaussian(self.__data, self.__means[index], self.__variances[index])
self.__rc = [ [0.0] * len(self.__data) for _ in range(self.__amount_clusters) ]
for index_cluster in range(self.__amount_clusters):
for index_point in range(len(self.__data)):
self.__rc[index_cluster][index_point] = self.__probabilities(index_cluster, index_point)
def __maximization_step(self):
self.__pic = []
self.__means = []
self.__variances = []
amount_impossible_clusters = 0
for index_cluster in range(self.__amount_clusters):
mc = numpy.sum(self.__rc[index_cluster])
if mc == 0.0:
amount_impossible_clusters += 1
continue
self.__pic.append( mc / len(self.__data) )
self.__means.append( self.__update_mean(self.__rc[index_cluster], mc) )
self.__variances.append( self.__update_covariance(self.__means[-1], self.__rc[index_cluster], mc) )
self.__amount_clusters -= amount_impossible_clusters
def __get_stop_condition(self):
for covariance in self.__variances:
if numpy.linalg.norm(covariance) == 0.0:
return True
return False
def __update_covariance(self, means, rc, mc):
covariance = 0.0
for index_point in range(len(self.__data)):
deviation = numpy.array([self.__data[index_point] - means])
covariance += rc[index_point] * deviation.T.dot(deviation)
covariance = covariance / mc
return covariance
def __update_mean(self, rc, mc):
mean = 0.0
for index_point in range(len(self.__data)):
mean += rc[index_point] * self.__data[index_point]
mean = mean / mc
return mean
def __normalize_probabilities(self):
for index_point in range(len(self.__data)):
probability = 0.0
for index_cluster in range(len(self.__clusters)):
probability += self.__rc[index_cluster][index_point]
if abs(probability - 1.0) > 0.000001:
self.__normalize_probability(index_point, probability)
def __normalize_probability(self, index_point, probability):
if probability == 0.0:
return
normalization = 1.0 / probability
for index_cluster in range(len(self.__clusters)):
self.__rc[index_cluster][index_point] *= normalization
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__amount_clusters < 1:
raise ValueError("Amount of clusters (current value '%d') should be greater or equal to 1." %
self.__amount_clusters)
| annoviko/pyclustering | pyclustering/cluster/ema.py | Python | gpl-3.0 | 28,795 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**constants.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines **Foundations** package default constants through the :class:`Constants` class.
**Others:**
"""
from __future__ import unicode_literals
import os
import platform
import foundations
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["Constants"]
class Constants():
"""
Defines **Foundations** package default constants.
"""
application_name = "Foundations"
"""
:param application_name: Package Application name.
:type application_name: unicode
"""
major_version = "2"
"""
:param major_version: Package major version.
:type major_version: unicode
"""
minor_version = "1"
"""
:param minor_version: Package minor version.
:type minor_version: unicode
"""
change_version = "0"
"""
:param change_version: Package change version.
:type change_version: unicode
"""
version = ".".join((major_version, minor_version, change_version))
"""
:param version: Package version.
:type version: unicode
"""
logger = "Foundations_Logger"
"""
:param logger: Package logger name.
:type logger: unicode
"""
verbosity_level = 3
"""
:param verbosity_level: Default logging verbosity level.
:type verbosity_level: int
"""
verbosity_labels = ("Critical", "Error", "Warning", "Info", "Debug")
"""
:param verbosity_labels: Logging verbosity labels.
:type verbosity_labels: tuple
"""
logging_default_formatter = "Default"
"""
:param logging_default_formatter: Default logging formatter name.
:type logging_default_formatter: unicode
"""
logging_separators = "*" * 96
"""
:param logging_separators: Logging separators.
:type logging_separators: unicode
"""
default_codec = "utf-8"
"""
:param default_codec: Default codec.
:type default_codec: unicode
"""
codec_error = "ignore"
"""
:param codec_error: Default codec error behavior.
:type codec_error: unicode
"""
application_directory = os.sep.join(("Foundations", ".".join((major_version, minor_version))))
"""
:param application_directory: Package Application directory.
:type application_directory: unicode
"""
if platform.system() == "Windows" or platform.system() == "Microsoft" or platform.system() == "Darwin":
provider_directory = "HDRLabs"
"""
:param provider_directory: Package provider directory.
:type provider_directory: unicode
"""
elif platform.system() == "Linux":
provider_directory = ".HDRLabs"
"""
:param provider_directory: Package provider directory.
:type provider_directory: unicode
"""
null_object = "None"
"""
:param null_object: Default null object string.
:type null_object: unicode
"""
| KelSolaar/Foundations | foundations/globals/constants.py | Python | gpl-3.0 | 3,184 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016/11/23 16:15
# @Author : xycfree
# @Link : http://example.org
# @Version : $
import os | xycfree/py_spider | baidu/__init__.py | Python | gpl-3.0 | 154 |
# -*- coding: utf-8 -*-
#
# This file is part of bd808's stashbot application
# Copyright (C) 2015 Bryan Davis and contributors
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from .bot import Stashbot
__all__ = (
'Stashbot',
)
any((
Stashbot,
))
| bd808/tools-stashbot | stashbot/__init__.py | Python | gpl-3.0 | 845 |
#!/usr/bin/env python
import os, sys
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import delete
try:
assert sys.argv[2]
except IndexError:
print 'usage: %s key url [purge (true/false)] ' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
try:
data = {}
data[ 'purge' ] = sys.argv[3]
except IndexError:
pass
delete( sys.argv[1], sys.argv[2], data )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/scripts/api/history_delete_history.py | Python | gpl-3.0 | 389 |
import rospy
import time
from collections import deque
class Publisher(object):
def __init__(self):
self.publishers = {}
self.queue = deque()
def add_publisher(self, alias, publisher):
self.publishers[alias] = publisher
def publish(self):
while len(self.queue) > 0:
alias, msg = self.queue.popleft()
print "publishing " + alias + ":" + str(msg)
self.publishers[alias].publish(msg)
def append(self, alias, msg):
self.queue.append((alias, msg))
| jgrizou/robot_omniwheel | catkin_ws/src/roslego/scripts/publisher.py | Python | gpl-3.0 | 541 |
import os, h5py, numpy
from scipy.sparse import csc_matrix
import ml2h5.task
from ml2h5 import VERSION_MLDATA
from ml2h5.converter import ALLOWED_SEPERATORS
class BaseHandler(object):
"""Base handler class.
It is the base for classes to handle different data formats.
It implicitely handles HDF5.
@cvar str_type: string type to be used for variable length strings in h5py
@type str_type: numpy.dtype
@ivar fname: name of file to handle
@type fname: string
@ivar seperator: seperator to seperate variables in examples
@type seperator: string
"""
str_type = h5py.new_vlen(numpy.str)
def __init__(self, fname, seperator=None, compression=None, merge=False):
"""
@param fname: name of in-file
@type fname: string
@param seperator: seperator used to seperate examples
@type seperator: string
"""
self.fname = fname
self.compression = compression
self.set_seperator(seperator)
self.merge = merge
def set_seperator(self, seperator):
"""Set the seperator to seperate variables in examples.
@param seperator: seperator to use
@type seperator: string
"""
if seperator in ALLOWED_SEPERATORS:
self.seperator = seperator
else:
raise AttributeError(_("Seperator '%s' not allowed!" % seperator))
def warn(self, msg):
"""Print a warning message.
@param msg: message to print
@type msg: string
"""
return
print('WARNING: ' + msg)
def _convert_to_ndarray(self,path,val):
"""converts a attribut to a set of ndarrays depending on the datatype
@param path: path of the attribute in the h5 file
@type path: string
@param val: data of the attribute
@type val: csc_matrix/ndarray
@rtype: list of (string,ndarray) tuples
"""
A=val
out=[]
dt = h5py.special_dtype(vlen=str)
if type(A)==csc_matrix: # sparse
out.append((path+'_indices', A.indices))
out.append((path+'_indptr', A.indptr))
out.append((path, A.data))
elif type(A)==list and len(A)>0 and type(A[0])==str:
out.append((path, numpy.array(A, dtype=dt)))
else: # dense
out.append((path, numpy.array(A)))
return out
def get_data_as_list(self,data):
""" this needs to `transpose' the data """
dl=[]
group=self.get_data_group(data)
lengths=dict()
for o in data['ordering']:
x=data[group][o]
#if numpy.issubdtype(x.dtype, numpy.int):
# data[group][o]=x.astype(numpy.float64)
try:
lengths[o]=data[group][o].shape[1]
except (AttributeError, IndexError):
lengths[o]=len(data[group][o])
l=set(lengths.values())
assert(len(l)==1)
l=l.pop()
for i in range(l):
line=[]
for o in data['ordering']:
try:
line.extend(data[group][o][:,i])
except:
line.append(data[group][o][i])
dl.append(line)
return dl
def get_name(self):
"""Get dataset name from non-HDF5 file
@return: comment
@rtype: string
"""
# without str() it might barf
return str(os.path.basename(self.fname).split('.')[0])
def get_data_group(self, data):
if data and 'group' in data:
return data['group']
else:
return 'data'
def get_descr_group(self, data):
if data and 'group' in data:
return data['group'] + '_descr'
else:
return 'data_descr'
def get_datatype(self, values):
"""Get data type of given values.
@param values: list of values to check
@type values: list
@return: data type to use for conversion
@rtype: numpy.int32/numpy.double/self.str_type
"""
dtype = None
for v in values:
if isinstance(v, int):
dtype = numpy.int32
elif isinstance(v, float):
dtype = numpy.double
else: # maybe int/double in string
try:
tmp = int(v)
if not dtype: # a previous nan might set it to double
dtype = numpy.int32
except ValueError:
try:
tmp = float(v)
dtype = numpy.double
except ValueError:
return self.str_type
return dtype
def read(self):
"""Get data and description in-memory
Retrieve contents from file.
@return: example names, ordering and the examples
@rtype: dict of: list of names, list of ordering and dict of examples
"""
# we want the exception handled elsewhere
if not h5py.is_hdf5(self.fname):
return
h5 = h5py.File(self.fname, 'r')
contents = {
'name': h5.attrs['name'],
'comment': h5.attrs['comment'],
'mldata': h5.attrs['mldata'],
}
if contents['comment']=='Task file':
contents['task']=dict()
contents['ordering']=list()
group='task'
for field in ml2h5.task.task_data_fields:
if field in h5[group]:
contents['ordering'].append(field)
else:
contents['data']=dict()
contents['ordering']=h5['/data_descr/ordering'][...].tolist()
group='data'
contents['group']=group
if '/%s_descr/names' % group in h5:
contents['names']=h5['/%s_descr/names' % group][...].tolist()
if '/%s_descr/types' % group in h5:
contents['types'] = h5['/%s_descr/types' % group ][...]
for name in contents['ordering']:
vname='/%s/%s' % (group, name)
sp_indices=vname+'_indices'
sp_indptr=vname+'_indptr'
if sp_indices in h5['/%s' % group] and sp_indptr in h5['/%s' % group]:
contents[group][name] = csc_matrix((h5[vname], h5[sp_indices], h5[sp_indptr])
)
else:
d = numpy.array(h5[vname],order='F')
try:
d=d['vlen']
except:
pass
contents[group][name] = d
h5.close()
return contents
def read_data_as_array(self):
"""Read data from file, and return an array
@return: an array with all data
@rtype: numpy ndarray
"""
contents = self.read()
#group = self.get_data_group(data)
data = contents['data']
ordering = contents['ordering']
if len(data[ordering[0]].shape)>1:
num_examples = data[ordering[0]].shape[1]
else:
num_examples = len(data[ordering[0]])
data_array = numpy.zeros((0, num_examples))
for cur_feat in ordering:
data_array = numpy.vstack([data_array, data[cur_feat]])
return data_array.T
def _get_merged(self, data):
"""Merge given data where appropriate.
String arrays are not merged, but all int and all double are merged
into one matrix.
@param data: data structure as returned by read()
@type data: dict
@return: merged data structure
@rtype: dict
"""
merged = {}
ordering = []
path = ''
idx = 0
merging = None
group = self.get_data_group(data)
for name in data['ordering']:
val = data[group][name]
if type(val) == csc_matrix:
merging = None
path = name
merged[path] = val
ordering.append(path)
continue
if name.endswith('_indices') or name.endswith('_indptr'):
merging = None
path = name
merged[path] = val
continue
if len(val) < 1: continue
t = type(val[0])
if t in [numpy.int32, numpy.int64]:
if merging == 'int':
merged[path].append(val)
else:
merging = 'int'
path = 'int' + str(idx)
ordering.append(path)
merged[path] = [val]
idx += 1
elif t == numpy.double:
if merging == 'double':
merged[path].append(val)
else:
merging = 'double'
path = 'double' + str(idx)
ordering.append(path)
merged[path] = [val]
idx += 1
else: # string or matrix
merging = None
if name.find('/') != -1: # / sep belongs to hdf5 path
path = name.replace('/', '+')
data['ordering'][data['ordering'].index(name)] = path
else:
path = name
ordering.append(path)
merged[path] = val
data[group] = {}
for k in merged:
if len(merged[k])==1:
merged[k] = merged[k][0]
data[group][k] = numpy.array(merged[k])
data['ordering'] = ordering
return data
def write(self, data):
"""Write given data to HDF5 file.
@param data: data to write to HDF5 file.
@type data: dict of lists
"""
# we want the exception handled elsewhere
h5 = h5py.File(self.fname, 'w')
h5.attrs['name'] = data['name']
h5.attrs['mldata'] = VERSION_MLDATA
h5.attrs['comment'] = data['comment']
data_group = self.get_data_group(data)
descr_group = self.get_descr_group(data)
try:
group = h5.create_group('/%s' % data_group)
for path, val in data[data_group].items():
for path, val in self._convert_to_ndarray(path,val):
group.create_dataset(path, data=val, compression=self.compression)
group = h5.create_group('/%s' % descr_group)
names = numpy.array(data['names']).astype(self.str_type)
if names.size > 0: # simple 'if names' throws exception if array
group.create_dataset('names', data=names, compression=self.compression)
ordering = numpy.array(data['ordering']).astype(self.str_type)
if ordering.size > 0:
group.create_dataset('ordering', data=ordering, compression=self.compression)
if 'types' in data:
types = numpy.array(data['types']).astype(self.str_type)
group.create_dataset('types', data=types, compression=self.compression)
except: # just do some clean-up
h5.close()
os.remove(self.fname)
raise
else:
h5.close()
| open-machine-learning/mldata-utils | ml2h5/converter/basehandler.py | Python | gpl-3.0 | 11,241 |
# coding: utf-8
from __future__ import unicode_literals
import re
from hashlib import sha1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
unified_strdate,
)
class ProSiebenSat1BaseIE(InfoExtractor):
def _extract_video_info(self, url, clip_id):
client_location = url
video = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos',
clip_id, 'Downloading videos JSON', query={
'access_token': self._TOKEN,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'ids': clip_id,
})[0]
if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration'))
source_ids = [compat_str(source['id']) for source in video['sources']]
client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
sources = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources' % clip_id,
clip_id, 'Downloading sources JSON', query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
})
server_id = sources['server_id']
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
formats = []
for source_id in source_ids:
client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
urls = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url' % clip_id,
clip_id, 'Downloading urls JSON', fatal=False, query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'server_id': server_id,
'source_ids': source_id,
})
if not urls:
continue
if urls.get('status_code') != 0:
raise ExtractorError('This video is unavailable', expected=True)
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
for source in urls_sources:
source_url = source.get('url')
if not source_url:
continue
protocol = source.get('protocol')
mimetype = source.get('mimetype')
if mimetype == 'application/f4m+xml' or 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, clip_id, f4m_id='hds', fatal=False))
elif mimetype == 'application/x-mpegURL':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif mimetype == 'application/dash+xml':
formats.extend(self._extract_mpd_formats(
source_url, clip_id, mpd_id='dash', fatal=False))
else:
tbr = fix_bitrate(source['bitrate'])
if protocol in ('rtmp', 'rtmpe'):
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '%s/%s' % (mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'tbr': tbr,
'ext': 'flv',
'format_id': 'rtmp%s' % ('-%d' % tbr if tbr else ''),
})
else:
formats.append({
'url': source_url,
'tbr': tbr,
'format_id': 'http%s' % ('-%d' % tbr if tbr else ''),
})
self._sort_formats(formats)
return {
'duration': duration,
'formats': formats,
}
class ProSiebenSat1IE(ProSiebenSat1BaseIE):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?:
(?:
prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
)\.(?:de|at|ch)|
ran\.de|fem\.com|advopedia\.de
)
/(?P<id>.+)
'''
_TESTS = [
{
# Tests changes introduced in https://github.com/rg3/youtube-dl/pull/6242
# in response to fixing https://github.com/rg3/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'mp4',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140203',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20141014',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'mp4',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'Romantischer Kurztrip zum Valentinstag? Nina Heinemann verrät, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
'info_dict': {
'id': '439664',
'title': 'Episode 8 - Ganze Folge - Playlist',
'description': 'md5:63b8963e71f481782aeea877658dec84',
},
'playlist_count': 2,
'skip': 'This video is unavailable',
},
{
'url': 'http://www.7tv.de/circus-halligalli/615-best-of-circus-halligalli-ganze-folge',
'info_dict': {
'id': '4187506',
'ext': 'mp4',
'title': 'Best of Circus HalliGalli',
'description': 'md5:8849752efd90b9772c9db6fdf87fb9e9',
'upload_date': '20151229',
},
'params': {
'skip_download': True,
},
},
{
# geo restricted to Germany
'url': 'http://www.kabeleinsdoku.de/tv/mayday-alarm-im-cockpit/video/102-notlandung-im-hudson-river-ganze-folge',
'only_matching': True,
},
{
# geo restricted to Germany
'url': 'http://www.sat1gold.de/tv/edel-starck/video/11-staffel-1-episode-1-partner-wider-willen-ganze-folge',
'only_matching': True,
},
{
'url': 'http://www.sat1gold.de/tv/edel-starck/playlist/die-gesamte-1-staffel',
'only_matching': True,
},
{
'url': 'http://www.advopedia.de/videos/lenssen-klaert-auf/lenssen-klaert-auf-folge-8-staffel-3-feiertage-und-freie-tage',
'only_matching': True,
},
]
_TOKEN = 'prosieben'
_SALT = '01!8d8F_)r9]4s[qeuXfP%'
_CLIENT_NAME = 'kolibri-2.0.19-splec4'
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
r'clip[iI]d\s*=\s*["\'](\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
r'<h2 class="video-title" itemprop="name">\s*(.+?)</h2>',
r'<div[^>]+id="veeseoTitle"[^>]*>(.+?)</div>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
r'<p class="video-description" itemprop="description">\s*(.+?)</p>',
r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
_PAGE_TYPE_REGEXES = [
r'<meta name="page_type" content="([^"]+)">',
r"'itemType'\s*:\s*'([^']*)'",
]
_PLAYLIST_ID_REGEXES = [
r'content[iI]d=(\d+)',
r"'itemId'\s*:\s*'([^']*)'",
]
_PLAYLIST_CLIP_REGEXES = [
r'(?s)data-qvt=.+?<a href="([^"]+)"',
]
def _extract_clip(self, url, webpage):
clip_id = self._html_search_regex(
self._CLIPID_REGEXES, webpage, 'clip id')
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
info = self._extract_video_info(url, clip_id)
description = self._html_search_regex(
self._DESCRIPTION_REGEXES, webpage, 'description', default=None)
if description is None:
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
info.update({
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
})
return info
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
playlist = self._parse_json(
self._search_regex(
r'var\s+contentResources\s*=\s*(\[.+?\]);\s*</script',
webpage, 'playlist'),
playlist_id)
entries = []
for item in playlist:
clip_id = item.get('id') or item.get('upc')
if not clip_id:
continue
info = self._extract_video_info(url, clip_id)
info.update({
'id': clip_id,
'title': item.get('title') or item.get('teaser', {}).get('headline'),
'description': item.get('teaser', {}).get('description'),
'thumbnail': item.get('poster'),
'duration': float_or_none(item.get('duration')),
'series': item.get('tvShowTitle'),
'uploader': item.get('broadcastPublisher'),
})
entries.append(info)
return self.playlist_result(entries, playlist_id)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_type = self._search_regex(
self._PAGE_TYPE_REGEXES, webpage,
'page type', default='clip').lower()
if page_type == 'clip':
return self._extract_clip(url, webpage)
elif page_type == 'playlist':
return self._extract_playlist(url, webpage)
else:
raise ExtractorError(
'Unsupported page type %s' % page_type, expected=True)
| Dunkas12/BeepBoopBot | lib/youtube_dl/extractor/prosiebensat1.py | Python | gpl-3.0 | 17,726 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from random import *
import numpy
import pdb
import cPickle
import bz2
import sys
import pylab
import nupic.bindings.algorithms as algo
from nupic.bindings.math import GetNumpyDataType
type = GetNumpyDataType('NTA_Real')
type = 'float32'
#--------------------------------------------------------------------------------
# Simple use case
#--------------------------------------------------------------------------------
def simple():
print "Simple"
numpy.random.seed(42)
n_dims = 2
n_class = 4
size = 200
labels = numpy.random.random_integers(0, n_class-1, size)
samples = numpy.zeros((size, n_dims), dtype=type)
do_plot = False
print "Generating data"
centers = numpy.array([[0,0],[0,1],[1,0],[1,1]])
for i in range(0, size):
t = 6.28 * numpy.random.random_sample()
samples[i][0] = 2 * centers[labels[i]][0] + .5*numpy.random.random() * numpy.cos(t)
samples[i][1] = 2 * centers[labels[i]][1] + .5*numpy.random.random() * numpy.sin(t)
classifier = algo.svm_dense(0, n_dims, probability=True, seed=42)
print "Adding sample vectors"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
print "Displaying problem"
problem = classifier.get_problem()
print "Problem size:", problem.size()
print "Problem dimensionality:", problem.n_dims()
print "Problem samples:"
s = numpy.zeros((problem.size(), problem.n_dims()+1), dtype=type)
problem.get_samples(s)
print s
if do_plot:
pylab.ion()
pylab.plot(s[s[:,0]==0,1], s[s[:,0]==0,2], '.', color='r')
pylab.plot(s[s[:,0]==1,1], s[s[:,0]==1,2], '+', color='b')
pylab.plot(s[s[:,0]==2,1], s[s[:,0]==2,2], '^', color='g')
pylab.plot(s[s[:,0]==3,1], s[s[:,0]==3,2], 'v', color='g')
print "Training"
classifier.train(gamma = 1./3., C = 100, eps=1e-1)
print "Displaying model"
model = classifier.get_model()
print "Number of support vectors:", model.size()
print "Number of classes:", model.n_class()
print "Number of dimensions: ", model.n_dims()
print "Support vectors:"
sv = numpy.zeros((model.size(), model.n_dims()), dtype=type)
model.get_support_vectors(sv)
print sv
if do_plot:
pylab.plot(sv[:,0], sv[:,1], 'o', color='g')
print "Support vector coefficients:"
svc = numpy.zeros((model.n_class()-1, model.size()), dtype=type)
model.get_support_vector_coefficients(svc)
print svc
print "Hyperplanes (for linear kernel only):"
h = model.get_hyperplanes()
print h
if do_plot:
xmin = numpy.min(samples[:,0])
xmax = numpy.max(samples[:,0])
xstep = (xmax - xmin) / 10
X = numpy.arange(xmin, xmax, xstep)
ymin = numpy.min(samples[:,1])
ymax = numpy.max(samples[:,1])
ystep = (ymax - ymin) / 10
Y = numpy.arange(ymin, ymax, ystep)
points = numpy.zeros((len(X), len(Y)))
for i,x in enumerate(X):
for j,y in enumerate(Y):
proba = numpy.zeros(model.n_class(), dtype=type)
classifier.predict_probability(numpy.array([x,y]), proba)
points[i,j] = proba[0]
pylab.contour(X,Y,points)
print "Cross-validation"
print classifier.cross_validate(2, gamma = .5, C = 10, eps = 1e-3)
print "Predicting"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
proba = numpy.zeros(model.n_class(), dtype=type)
print x, ': real=', y,
print 'p1=', classifier.predict(x),
print 'p2=', classifier.predict_probability(x, proba),
print 'proba=', proba
print "Discarding problem"
classifier.discard_problem()
print "Predicting after discarding the problem"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
proba = numpy.zeros(model.n_class(), dtype=type)
print x, ': real=', y,
print 'p1=', classifier.predict(x),
print 'p2=', classifier.predict_probability(x, proba),
print 'proba=', proba
#--------------------------------------------------------------------------------
# Persistence
#--------------------------------------------------------------------------------
def persistence():
print "Persistence"
numpy.random.seed(42)
n_dims = 2
n_class = 12
size = 100
labels = numpy.random.random_integers(0, 256, size)
samples = numpy.zeros((size, n_dims), dtype=type)
print "Generating data"
for i in range(0, size):
t = 6.28 * numpy.random.random_sample()
samples[i][0] = 2 * labels[i] + 1.5 * numpy.cos(t)
samples[i][1] = 2 * labels[i] + 1.5 * numpy.sin(t)
print "Creating dense classifier"
classifier = algo.svm_dense(0, n_dims = n_dims, seed=42)
print "Adding sample vectors to dense classifier"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
print "Pickling dense classifier"
cPickle.dump(classifier, open('test', 'wb'))
classifier = cPickle.load(open('test', 'rb'))
print "Training dense classifier"
classifier.train(gamma = 1, C = 10, eps=1e-1)
print "Predicting with dense classifier"
print classifier.predict(samples[0])
print "Creating 0/1 classifier"
classifier01 = algo.svm_01(n_dims = n_dims, seed=42)
print "Adding sample vectors to 0/1 classifier"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier01.add_sample(float(y), x)
print "Training 0/1 classifier"
classifier01.train(gamma = 1./3., C = 100, eps=1e-1)
print "Pickling 0/1 classifier"
cPickle.dump(classifier01, open('test', 'wb'))
classifier01 = cPickle.load(open('test', 'rb'))
print "Predicting with 0/1 classifier"
print classifier01.predict(numpy.array(samples[0], dtype=type))
#--------------------------------------------------------------------------------
# Cross validation
#--------------------------------------------------------------------------------
def cross_validation():
return
print "Cross validation"
numpy.random.seed(42)
labels = [0, 1, 1, 2, 1, 2]
samples = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [1, 1, 0], [0, 1, 1]]
classifier = algo.svm_dense(0, n_dims = 3, seed=42)
print "Adding sample vectors"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
cPickle.dump(classifier, open('test', 'wb'))
classifier = cPickle.load(open('test', 'rb'))
print "Training"
classifier.train(gamma = 1./3., C = 100, eps=1e-1)
print "Cross validation =",
print classifier.cross_validate(3, gamma = .5, C = 10, eps = 1e-3)
#--------------------------------------------------------------------------------
simple()
persistence()
cross_validation()
| tkaitchuck/nupic | examples/bindings/svm_how_to.py | Python | gpl-3.0 | 8,034 |
from cantilever_divingboard import *
# We need to scale the parameters before applying the optimization algorithm
# Normally there are about 20 orders of magnitude between the dimensions and
# the doping concentration, so this is a critical step
# Run the script
freq_min = 1e3
freq_max = 1e5
omega_min = 100e3
initial_guess = (50e-6, 1e-6, 1e-6,
30e-6, 1e-6, 1e-6, 500e-9, 5., 1e15)
constraints = ((30e-6, 100e-6), (500e-9, 20e-6), (1e-6, 10e-6),
(2e-6, 100e-6), (500e-9, 5e-6), (500e-9, 20e-6), (30e-9, 10e-6),
(1., 10.), (1e15, 4e19))
x = optimize_cantilever(initial_guess, constraints, freq_min, freq_max, omega_min)
c = cantilever_divingboard(freq_min, freq_max, x)
c.print_performance() | jcdoll/PiezoD | python/archive/lbfgs.py | Python | gpl-3.0 | 752 |
"""
Django settings for spa_movies project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ss!@cvdm$38bkbuk5hw!_csg(_@kfl3_)3vi$!@_2q(f!l1q!q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['davgibbs.pythonanywhere.com', '127.0.0.1']
SECURE_SSL_REDIRECT = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'movies.apps.MoviesConfig',
'rest_framework_swagger',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spa_movies.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spa_movies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# User uploaded files "Media"
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Expire session after 3 hours
SESSION_COOKIE_AGE = 60 * 60 * 3
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
| davgibbs/movies-spa | apps/spa_movies/settings.py | Python | gpl-3.0 | 3,710 |
# Configs for mk-livestatus lookup scripts
HOST = [ 'nagios', 'nagios1' ]
PORT = 6557
| skywalka/splunk-for-nagios | bin/mklivestatus.py | Python | gpl-3.0 | 87 |
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class SendmywayCom(XFSHoster):
__name__ = "SendmywayCom"
__type__ = "hoster"
__version__ = "0.04"
__pattern__ = r'http://(?:www\.)?sendmyway\.com/\w{12}'
__description__ = """SendMyWay hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
HOSTER_DOMAIN = "sendmyway.com"
NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)'
SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>'
getInfo = create_getInfo(SendmywayCom)
| sebdelsol/pyload | module/plugins/hoster/SendmywayCom.py | Python | gpl-3.0 | 619 |
from __future__ import unicode_literals
from frappe import _
app_name = "erpnext"
app_title = "ERPNext"
app_publisher = "Frappe Technologies Pvt. Ltd."
app_description = """ERP made simple"""
app_icon = "fa fa-th"
app_color = "#e74c3c"
app_email = "[email protected]"
app_license = "GNU General Public License (v3)"
source_link = "https://github.com/frappe/erpnext"
develop_version = '12.x.x-develop'
# error_report_email = "[email protected]"
app_include_js = "assets/js/erpnext.min.js"
app_include_css = "assets/css/erpnext.css"
web_include_js = "assets/js/erpnext-web.min.js"
web_include_css = "assets/css/erpnext-web.css"
doctype_js = {
"Communication": "public/js/communication.js",
"Event": "public/js/event.js"
}
welcome_email = "erpnext.setup.utils.welcome_email"
# setup wizard
setup_wizard_requires = "assets/erpnext/js/setup_wizard.js"
setup_wizard_stages = "erpnext.setup.setup_wizard.setup_wizard.get_setup_stages"
setup_wizard_test = "erpnext.setup.setup_wizard.test_setup_wizard.run_setup_wizard_test"
before_install = "erpnext.setup.install.check_setup_wizard_not_completed"
after_install = "erpnext.setup.install.after_install"
boot_session = "erpnext.startup.boot.boot_session"
notification_config = "erpnext.startup.notifications.get_notification_config"
get_help_messages = "erpnext.utilities.activation.get_help_messages"
get_user_progress_slides = "erpnext.utilities.user_progress.get_user_progress_slides"
update_and_get_user_progress = "erpnext.utilities.user_progress_utils.update_default_domain_actions_and_get_state"
on_session_creation = "erpnext.shopping_cart.utils.set_cart_count"
on_logout = "erpnext.shopping_cart.utils.clear_cart_count"
treeviews = ['Account', 'Cost Center', 'Warehouse', 'Item Group', 'Customer Group', 'Sales Person', 'Territory', 'Assessment Group']
# website
update_website_context = "erpnext.shopping_cart.utils.update_website_context"
my_account_context = "erpnext.shopping_cart.utils.update_my_account_context"
email_append_to = ["Job Applicant", "Lead", "Opportunity", "Issue"]
calendars = ["Task", "Work Order", "Leave Application", "Sales Order", "Holiday List", "Course Schedule"]
domains = {
'Agriculture': 'erpnext.domains.agriculture',
'Distribution': 'erpnext.domains.distribution',
'Education': 'erpnext.domains.education',
'Healthcare': 'erpnext.domains.healthcare',
'Hospitality': 'erpnext.domains.hospitality',
'Manufacturing': 'erpnext.domains.manufacturing',
'Non Profit': 'erpnext.domains.non_profit',
'Retail': 'erpnext.domains.retail',
'Services': 'erpnext.domains.services',
}
website_generators = ["Item Group", "Item", "BOM", "Sales Partner",
"Job Opening", "Student Admission"]
website_context = {
"favicon": "/assets/erpnext/images/favicon.png",
"splash_image": "/assets/erpnext/images/erp-icon.svg"
}
website_route_rules = [
{"from_route": "/orders", "to_route": "Sales Order"},
{"from_route": "/orders/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Order",
"parents": [{"label": _("Orders"), "route": "orders"}]
}
},
{"from_route": "/invoices", "to_route": "Sales Invoice"},
{"from_route": "/invoices/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Invoice",
"parents": [{"label": _("Invoices"), "route": "invoices"}]
}
},
{"from_route": "/supplier-quotations", "to_route": "Supplier Quotation"},
{"from_route": "/supplier-quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Supplier Quotation",
"parents": [{"label": _("Supplier Quotation"), "route": "supplier-quotations"}]
}
},
{"from_route": "/quotations", "to_route": "Quotation"},
{"from_route": "/quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Quotation",
"parents": [{"label": _("Quotations"), "route": "quotations"}]
}
},
{"from_route": "/shipments", "to_route": "Delivery Note"},
{"from_route": "/shipments/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Delivery Note",
"parents": [{"label": _("Shipments"), "route": "shipments"}]
}
},
{"from_route": "/rfq", "to_route": "Request for Quotation"},
{"from_route": "/rfq/<path:name>", "to_route": "rfq",
"defaults": {
"doctype": "Request for Quotation",
"parents": [{"label": _("Request for Quotation"), "route": "rfq"}]
}
},
{"from_route": "/addresses", "to_route": "Address"},
{"from_route": "/addresses/<path:name>", "to_route": "addresses",
"defaults": {
"doctype": "Address",
"parents": [{"label": _("Addresses"), "route": "addresses"}]
}
},
{"from_route": "/jobs", "to_route": "Job Opening"},
{"from_route": "/admissions", "to_route": "Student Admission"},
{"from_route": "/boms", "to_route": "BOM"},
{"from_route": "/timesheets", "to_route": "Timesheet"},
]
standard_portal_menu_items = [
{"title": _("Personal Details"), "route": "/personal-details", "reference_doctype": "Patient", "role": "Patient"},
{"title": _("Projects"), "route": "/project", "reference_doctype": "Project"},
{"title": _("Request for Quotations"), "route": "/rfq", "reference_doctype": "Request for Quotation", "role": "Supplier"},
{"title": _("Supplier Quotation"), "route": "/supplier-quotations", "reference_doctype": "Supplier Quotation", "role": "Supplier"},
{"title": _("Quotations"), "route": "/quotations", "reference_doctype": "Quotation", "role":"Customer"},
{"title": _("Orders"), "route": "/orders", "reference_doctype": "Sales Order", "role":"Customer"},
{"title": _("Invoices"), "route": "/invoices", "reference_doctype": "Sales Invoice", "role":"Customer"},
{"title": _("Shipments"), "route": "/shipments", "reference_doctype": "Delivery Note", "role":"Customer"},
{"title": _("Issues"), "route": "/issues", "reference_doctype": "Issue", "role":"Customer"},
{"title": _("Addresses"), "route": "/addresses", "reference_doctype": "Address"},
{"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"},
{"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"},
{"title": _("Lab Test"), "route": "/lab-test", "reference_doctype": "Lab Test", "role":"Patient"},
{"title": _("Prescription"), "route": "/prescription", "reference_doctype": "Patient Encounter", "role":"Patient"},
{"title": _("Patient Appointment"), "route": "/patient-appointments", "reference_doctype": "Patient Appointment", "role":"Patient"},
{"title": _("Fees"), "route": "/fees", "reference_doctype": "Fees", "role":"Student"},
{"title": _("Newsletter"), "route": "/newsletters", "reference_doctype": "Newsletter"},
{"title": _("Admission"), "route": "/admissions", "reference_doctype": "Student Admission"},
{"title": _("Certification"), "route": "/certification", "reference_doctype": "Certification Application"},
]
default_roles = [
{'role': 'Customer', 'doctype':'Contact', 'email_field': 'email_id'},
{'role': 'Supplier', 'doctype':'Contact', 'email_field': 'email_id'},
{'role': 'Student', 'doctype':'Student', 'email_field': 'student_email_id'},
]
has_website_permission = {
"Sales Order": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Sales Invoice": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Supplier Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Delivery Note": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Issue": "erpnext.support.doctype.issue.issue.has_website_permission",
"Timesheet": "erpnext.controllers.website_list_for_contact.has_website_permission",
"Lab Test": "erpnext.healthcare.web_form.lab_test.lab_test.has_website_permission",
"Patient Encounter": "erpnext.healthcare.web_form.prescription.prescription.has_website_permission",
"Patient Appointment": "erpnext.healthcare.web_form.patient_appointments.patient_appointments.has_website_permission",
"Patient": "erpnext.healthcare.web_form.personal_details.personal_details.has_website_permission"
}
dump_report_map = "erpnext.startup.report_data_map.data_map"
before_tests = "erpnext.setup.utils.before_tests"
standard_queries = {
"Customer": "erpnext.selling.doctype.customer.customer.get_customer_list",
"Healthcare Practitioner": "erpnext.healthcare.doctype.healthcare_practitioner.healthcare_practitioner.get_practitioner_list"
}
doc_events = {
"Stock Entry": {
"on_submit": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty",
"on_cancel": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty"
},
"User": {
"after_insert": "frappe.contacts.doctype.contact.contact.update_contact",
"validate": "erpnext.hr.doctype.employee.employee.validate_employee_role",
"on_update": ["erpnext.hr.doctype.employee.employee.update_user_permissions",
"erpnext.portal.utils.set_default_role"]
},
("Sales Taxes and Charges Template", 'Price List'): {
"on_update": "erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings.validate_cart_settings"
},
"Website Settings": {
"validate": "erpnext.portal.doctype.products_settings.products_settings.home_page_is_products"
},
"Sales Invoice": {
"on_submit": ["erpnext.regional.france.utils.create_transaction_log", "erpnext.regional.italy.utils.sales_invoice_on_submit"],
"on_cancel": "erpnext.regional.italy.utils.sales_invoice_on_cancel",
"on_trash": "erpnext.regional.check_deletion_permission"
},
"Payment Entry": {
"on_submit": ["erpnext.regional.france.utils.create_transaction_log", "erpnext.accounts.doctype.payment_request.payment_request.make_status_as_paid"],
"on_trash": "erpnext.regional.check_deletion_permission"
},
'Address': {
'validate': ['erpnext.regional.india.utils.validate_gstin_for_india', 'erpnext.regional.italy.utils.set_state_code']
},
('Sales Invoice', 'Purchase Invoice', 'Delivery Note'): {
'validate': 'erpnext.regional.india.utils.set_place_of_supply'
},
"Contact":{
"on_trash": "erpnext.support.doctype.issue.issue.update_issue"
}
}
scheduler_events = {
"all": [
"erpnext.projects.doctype.project.project.project_status_update_reminder"
],
"hourly": [
'erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.trigger_emails',
"erpnext.accounts.doctype.subscription.subscription.process_all",
"erpnext.erpnext_integrations.doctype.amazon_mws_settings.amazon_mws_settings.schedule_get_order_details",
"erpnext.projects.doctype.project.project.hourly_reminder",
"erpnext.projects.doctype.project.project.collect_project_status"
],
"daily": [
"erpnext.stock.reorder_item.reorder_item",
"erpnext.setup.doctype.email_digest.email_digest.send",
"erpnext.support.doctype.issue.issue.auto_close_tickets",
"erpnext.crm.doctype.opportunity.opportunity.auto_close_opportunity",
"erpnext.controllers.accounts_controller.update_invoice_status",
"erpnext.accounts.doctype.fiscal_year.fiscal_year.auto_create_fiscal_year",
"erpnext.hr.doctype.employee.employee.send_birthday_reminders",
"erpnext.projects.doctype.task.task.set_tasks_as_overdue",
"erpnext.assets.doctype.asset.depreciation.post_depreciation_entries",
"erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.send_summary",
"erpnext.stock.doctype.serial_no.serial_no.update_maintenance_status",
"erpnext.buying.doctype.supplier_scorecard.supplier_scorecard.refresh_scorecards",
"erpnext.setup.doctype.company.company.cache_companies_monthly_sales_history",
"erpnext.assets.doctype.asset.asset.update_maintenance_status",
"erpnext.assets.doctype.asset.asset.make_post_gl_entry",
"erpnext.crm.doctype.contract.contract.update_status_for_contracts",
"erpnext.projects.doctype.project.project.update_project_sales_billing",
"erpnext.projects.doctype.project.project.send_project_status_email_to_users"
],
"daily_long": [
"erpnext.manufacturing.doctype.bom_update_tool.bom_update_tool.update_latest_price_in_all_boms"
],
"monthly": [
"erpnext.accounts.deferred_revenue.convert_deferred_revenue_to_income",
"erpnext.accounts.deferred_revenue.convert_deferred_expense_to_expense",
"erpnext.hr.utils.allocate_earned_leaves"
]
}
email_brand_image = "assets/erpnext/images/erpnext-logo.jpg"
default_mail_footer = """
<span>
Sent via
<a class="text-muted" href="https://erpnext.com?source=via_email_footer" target="_blank">
ERPNext
</a>
</span>
"""
get_translated_dict = {
("doctype", "Global Defaults"): "frappe.geo.country_info.get_translated_dict"
}
bot_parsers = [
'erpnext.utilities.bot.FindItemBot',
]
get_site_info = 'erpnext.utilities.get_site_info'
payment_gateway_enabled = "erpnext.accounts.utils.create_payment_gateway_account"
regional_overrides = {
'France': {
'erpnext.tests.test_regional.test_method': 'erpnext.regional.france.utils.test_method'
},
'India': {
'erpnext.tests.test_regional.test_method': 'erpnext.regional.india.utils.test_method',
'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_header': 'erpnext.regional.india.utils.get_itemised_tax_breakup_header',
'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_data': 'erpnext.regional.india.utils.get_itemised_tax_breakup_data',
'erpnext.accounts.party.get_regional_address_details': 'erpnext.regional.india.utils.get_regional_address_details',
'erpnext.hr.utils.calculate_annual_eligible_hra_exemption': 'erpnext.regional.india.utils.calculate_annual_eligible_hra_exemption',
'erpnext.hr.utils.calculate_hra_exemption_for_period': 'erpnext.regional.india.utils.calculate_hra_exemption_for_period'
},
'United Arab Emirates': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data'
},
'Saudi Arabia': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data'
},
'Italy': {
'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.italy.utils.update_itemised_tax_data',
'erpnext.controllers.accounts_controller.validate_regional': 'erpnext.regional.italy.utils.sales_invoice_validate',
}
}
| ESS-LLP/erpnext-healthcare | erpnext/hooks.py | Python | gpl-3.0 | 14,269 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-30 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pattern', '0014_pattern_editnumber'),
]
operations = [
migrations.AddField(
model_name='pattern',
name='json',
field=models.TextField(null=True),
),
]
| yaxu/patternlib | pattern/migrations/0015_pattern_json.py | Python | gpl-3.0 | 444 |
#!/usr/bin/env python
# import sys
# sys.path.insert(0, '../')
import pyarchey.pyarchey as py
# o = py.Output()
# print 'Distro Name Pretty Name'
# print '---------------------------'
# print o.readDistro('./slack.test')
# print o.readDistro('./arch.test')
# print o.readDistro('./raspbian.test')
def test_slack():
o = py.Output()
assert o.readDistro('./test/slack.test') == ('Slackware', 'Slackware 14.1')
def test_arch():
o = py.Output()
assert o.readDistro('./test/arch.test') == ('Arch Linux', 'Arch Linux')
def test_raspbian():
o = py.Output()
assert o.readDistro('./test/raspbian.test') == ('Raspbian', 'Raspbian 7 (wheezy)') | walchko/pyarchey | test.py | Python | gpl-3.0 | 646 |
"""__Main__."""
import sys
import os
import logging
import argparse
import traceback
import shelve
from datetime import datetime
from CONSTANTS import CONSTANTS
from settings.settings import load_config, load_core, load_remote, load_email
from settings.settings import load_html, load_sms
from core import read_structure, readStructureFromFile, updateStructure
from core import clean_video_db, syncDirTree, transferLongVersions
from core import executeToDoFile, build_html_report, umount
from core import check_and_correct_videos_errors, clean_remote
from core import get_new_file_ids_from_structure, mount, check_mkv_videos
from notifications import send_sms_notification, send_mail_report, send_mail_log
def get_args():
"""Get args."""
parser = argparse.ArgumentParser(description='pyHomeVM')
parser.add_argument('-c', '--config_file_path',
action='store',
default='settings/dev_config.cfg',
help='path to config file that is to be used.')
parser.add_argument('-s', '--sms', help='Enables sms notifications',
action='store_true')
parser.add_argument('-l', '--log', help='Enables log sending by e-mail',
action='store_true')
parser.add_argument('-r', '--report',
help='Enables html report sending by e-mail',
action='store_true')
parser.add_argument('-rem', '--remote',
help='Enables transfer of long versions to remote storage',
action='store_true')
parser.add_argument('-b', '--backup',
help='Enables backup of first videos',
action='store_true')
parser.add_argument('-stats',
help='Gets you statistics about your videos',
action='store_true')
args = parser.parse_args()
return args
def load_logger():
"""Load logger."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(CONSTANTS['log_file_path'])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def main(argv=None):
"""Run main."""
start_time = datetime.now()
args = get_args() # Get args
logger = load_logger() # Set logger
logger.info('PROGRAM STARTED')
pid = str(os.getpid())
pidfile = "/tmp/pyHomeVM.pid"
config = load_config(args.config_file_path) # load config file
if os.path.isfile(pidfile):
logger.info('Program already running')
html = load_html(config)
email = load_email(config)
send_mail_log(CONSTANTS['log_file_path'], email, html)
sys.exit()
file(pidfile, 'w').write(pid)
(ffmpeg, local) = load_core(config) # load core configs
remote = load_remote(config)
html = load_html(config)
sms = load_sms(config)
email = load_email(config)
if(args.log):
email = load_email(config)
if(args.report):
html = load_html(config)
if(args.remote):
remote = load_remote(config)
if(args.sms):
sms = load_sms(config)
video_db = shelve.open(CONSTANTS['video_db_path'], writeback=True)
try:
if not os.path.exists(CONSTANTS['structure_file_path']):
raise Exception("Directory structure definition file not found.")
past_structure = readStructureFromFile(CONSTANTS)
except Exception:
logger.info(traceback.format_exc())
logger.info('{} not found'.format(CONSTANTS['structure_file_path']))
past_structure = {} # Start as new
new_structure = read_structure(local)
video_ids = get_new_file_ids_from_structure(new_structure, video_db)
check_and_correct_videos_errors(video_ids, video_db, local, ffmpeg)
logger.info('Checked for errors and corrupted')
html_data = updateStructure(
past_structure,
read_structure(local),
local,
ffmpeg,
remote,
video_db)
sms_sent_file = os.path.join(CONSTANTS['script_root_dir'], 'sms_sent')
if(mount(remote)):
logger.info('Mount succesfull')
syncDirTree(local, remote)
transferLongVersions(local, remote, video_db)
if(os.path.isfile(CONSTANTS['todo_file_path'])):
executeToDoFile(CONSTANTS['todo_file_path'], local, CONSTANTS)
if(os.path.exists(sms_sent_file)):
os.remove(sms_sent_file)
logger.info('sms_sent file has been deleted')
clean_remote(remote)
umount(remote)
else:
logger.info('Mount unssuccesfull')
if(not os.path.exists(sms_sent_file) and args.sms):
send_sms_notification(sms)
logger.info('Sms sent')
with open(sms_sent_file, 'w') as sms_not:
msg = 'SMS has been sent {}'.format(CONSTANTS['TODAY'])
sms_not.write(msg)
logger.info(msg)
if(args.report and (
html_data['new'] != '' or
html_data['modified'] != '' or
html_data['deleted'] != '' or
html_data['moved'] != '')):
html_report = build_html_report(html_data, CONSTANTS, html)
send_mail_report(html_report, email)
logger.info('Mail report sent')
if(args.log):
send_mail_log(CONSTANTS['log_file_path'], email, html)
logger.info('log file sent')
clean_video_db(video_db)
check_mkv_videos(local, video_db)
logger.info('DB cleaned')
video_db.close()
logger.info('Script ran in {}'.format(datetime.now() - start_time))
os.unlink(pidfile)
if __name__ == "__main__":
sys.exit(main())
| Hoohm/pyHomeVM | pyHomeVM/__main__.py | Python | gpl-3.0 | 5,792 |
# -*- coding: utf-8 -*-
import gensim, logging
class SemanticVector:
model = ''
def __init__(self, structure):
self.structure = structure
def model_word2vec(self, min_count=15, window=15, size=100):
print 'preparing sentences list'
sentences = self.structure.prepare_list_of_words_in_sentences()
print 'start modeling'
self.model = gensim.models.Word2Vec(sentences, size=size, window=window, min_count=min_count, workers=4, sample=0.001, sg=0)
return self.model
def save_model(self, name):
self.model.save(name)
def load_model(self, name):
self.model = gensim.models.Word2Vec.load(name)
| arashzamani/lstm_nlg_ver1 | language_parser/SemanticVector.py | Python | gpl-3.0 | 679 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5 import PE_SUBSYSTEM_ENUMERATION
from scap.model.oval_5.defs.EntityStateType import EntityStateType
logger = logging.getLogger(__name__)
class EntityStatePeSubsystemType(EntityStateType):
MODEL_MAP = {
}
def get_value_enum(self):
return PE_SUBSYSTEM_ENUMERATION
| cjaymes/pyscap | src/scap/model/oval_5/defs/windows/EntityStatePeSubsystemType.py | Python | gpl-3.0 | 1,004 |
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import scipy
import random
from gnuradio import gr, gr_unittest
import blocks_swig as blocks
import digital_swig as digital
import channels_swig as channels
from ofdm_txrx import ofdm_tx, ofdm_rx
from utils import tagged_streams
# Set this to true if you need to write out data
LOG_DEBUG_INFO=False
class ofdm_tx_fg (gr.top_block):
def __init__(self, data, len_tag_key):
gr.top_block.__init__(self, "ofdm_tx")
tx_data, tags = tagged_streams.packets_to_vectors((data,), len_tag_key)
src = blocks.vector_source_b(data, False, 1, tags)
self.tx = ofdm_tx(packet_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
self.sink = blocks.vector_sink_c()
self.connect(src, self.tx, self.sink)
def get_tx_samples(self):
return self.sink.data()
class ofdm_rx_fg (gr.top_block):
def __init__(self, samples, len_tag_key, channel=None, prepend_zeros=100):
gr.top_block.__init__(self, "ofdm_rx")
if prepend_zeros:
samples = (0,) * prepend_zeros + tuple(samples)
src = blocks.vector_source_c(tuple(samples) + (0,) * 1000)
self.rx = ofdm_rx(frame_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
if channel is not None:
self.connect(src, channel, self.rx)
else:
self.connect(src, self.rx)
self.sink = blocks.vector_sink_b()
self.connect(self.rx, self.sink)
def get_rx_bytes(self):
return self.sink.data()
class test_ofdm_txrx (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_tx (self):
""" Just make sure the Tx works in general """
len_tag_key = 'frame_len'
n_bytes = 52
n_samples_expected = (numpy.ceil(1.0 * (n_bytes + 4) / 6) + 3) * 80
test_data = [random.randint(0, 255) for x in range(n_bytes)]
tx_data, tags = tagged_streams.packets_to_vectors((test_data,), len_tag_key)
src = blocks.vector_source_b(test_data, False, 1, tags)
tx = ofdm_tx(packet_length_tag_key=len_tag_key)
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
self.assertEqual(len(tx_fg.get_tx_samples()), n_samples_expected)
def test_002_rx_only_noise(self):
""" Run the RX with only noise, check it doesn't crash
or return a burst. """
len_tag_key = 'frame_len'
samples = (0,) * 1000
channel = channels.channel_model(0.1)
rx_fg = ofdm_rx_fg(samples, len_tag_key, channel)
rx_fg.run()
self.assertEqual(len(rx_fg.get_rx_bytes()), 0)
def test_003_tx1packet(self):
""" Transmit one packet, with slight AWGN and slight frequency + timing offset.
Check packet is received and no bit errors have occurred. """
len_tag_key = 'frame_len'
n_bytes = 21
fft_len = 64
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier, a fine freq offset stays below that
freq_offset = 1.0 / fft_len * 0.7
#channel = channels.channel_model(0.01, freq_offset)
channel = None
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(tuple(tx_fg.tx.sync_word1), tuple(rx_fg.rx.sync_word1))
self.assertEqual(tuple(tx_fg.tx.sync_word2), tuple(rx_fg.rx.sync_word2))
self.assertEqual(test_data, rx_data)
def test_004_tx1packet_large_fO(self):
""" Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. """
fft_len = 64
len_tag_key = 'frame_len'
n_bytes = 21
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
#test_data = tuple([255 for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier
frequency_offset = 1.0 / fft_len * 2.5
channel = channels.channel_model(0.00001, frequency_offset)
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(test_data, rx_data)
if __name__ == '__main__':
gr_unittest.run(test_ofdm_txrx, "test_ofdm_txrx.xml")
| Gabotero/GNURadioNext | gr-digital/python/qa_ofdm_txrx.py | Python | gpl-3.0 | 5,471 |
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, UniqueConstraint
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///challenge.sqlite', echo=False)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Endereco(Base):
__tablename__ = "endereco"
#id = Column(Integer, primary_key=True)
logradouro = Column(String)
bairro = Column(String)
cidade = Column(String)
estado = Column(String)
cep = Column(String, primary_key=True)
__table_args__ = (UniqueConstraint('cep'),)
def __repr__(self):
return "{}".format(self.cep)
Base.metadata.create_all(engine)
| alfredocdmiranda/challenge-cep | models.py | Python | gpl-3.0 | 782 |
# xVector Engine Client
# Copyright (c) 2011 James Buchwald
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Contains code for nicely reporting errors to the user.
"""
import logging
import traceback
from PyQt4 import QtGui
from xVClient import ClientGlobals
mainlog = logging.getLogger("")
# Severity constants
FatalError = 1
"""Fatal error, forces termination of application."""
NormalError = 2
"""Normal error, this has impact but does not crash the program."""
WarningError = 3
"""Warning, this does not affect function but should cause concern."""
NoticeError = 4
"""General information."""
def ShowError(message, severity=NormalError, parent=None):
"""
Displays an error message to the user and waits for a response.
"""
dlg = QtGui.QMessageBox(parent)
dlg.setText(message)
if severity == FatalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Fatal Error")
elif severity == NormalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Error")
elif severity == WarningError:
dlg.setIcon(QtGui.QMessageBox.Warning)
dlg.setWindowTitle("Warning")
elif severity == NoticeError:
dlg.setIcon(QtGui.QMessageBox.Information)
dlg.setWindowTitle("Notice")
else:
dlg.setIcon(QtGui.QMessageBox.NoIcon)
dlg.setWindowTitle("Message")
dlg.exec_()
def ShowException(severity=NormalError, start_msg='An error has occurred!', parent=None):
'''
Displays the currently-handled exception in an error box.
'''
msg = start_msg + "\n\n" + traceback.format_exc()
ShowError(msg, severity, parent)
class ErrorMessageHandler(logging.Handler):
'''
Logging handler that displays messages in Qt message boxes.
'''
def __init__(self, parent=None):
'''
Creates a new handler.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
super(ErrorMessageHandler,self).__init__()
self.Parent = parent
'''Parent widget for errors to be displayed under.'''
def _ShowError(self, message):
'''
Shows an error message and returns immediately.
@type message: string
@param message: Message to display.
'''
app = ClientGlobals.Application
wnd = QtGui.QMessageBox(parent=self.Parent)
wnd.setIcon(QtGui.QMessageBox.Critical)
wnd.setWindowTitle("Error")
wnd.setStandardButtons(QtGui.QMessageBox.Ok)
wnd.setText(message)
wnd.exec_()
def emit(self, record):
self._ShowError(record.getMessage())
def ConfigureLogging(parent=None):
'''
Configures the logging mechanism to report errors as dialog boxes.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
# Set up the error handler (output to a message box).
handler = ErrorMessageHandler(parent)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
handler.setLevel(logging.ERROR)
mainlog.addHandler(handler)
# Send lower-level messages to stderr.
lowhandler = logging.StreamHandler()
lowhandler.setFormatter(formatter)
lowhandler.setLevel(logging.DEBUG)
mainlog.addHandler(lowhandler)
# Make sure that the logger catches all levels of messages.
mainlog.setLevel(logging.DEBUG)
| buchwj/xvector | client/xVClient/ErrorReporting.py | Python | gpl-3.0 | 4,145 |
#
# Test PM force parallelisation:
# check force does not depend on number of MPI nodes
import fs
import numpy as np
import h5py
import pm_setup
# read reference file
# $ python3 create_force_h5.py to create
file = h5py.File('force_%s.h5' % fs.config_precision(), 'r')
ref_id = file['id'][:]
ref_force = file['f'][:]
file.close()
# compute PM force
fs.msg.set_loglevel(0)
particles = pm_setup.force()
particle_id = particles.id
particle_force = particles.force
# compare two forces
if fs.comm.this_node() == 0:
assert(np.all(particle_id == ref_id))
print('pm_force id OK')
force_rms = np.std(ref_force)
diff = particle_force - ref_force
diff_rms = np.std(diff)
print('pm_force rms error %e / %e' % (diff_rms, force_rms))
diff_max = np.max(np.abs(diff))
print('pm_force max error %e / %e' % (diff_max, force_rms))
eps = np.finfo(particle_force.dtype).eps
assert(diff_rms < 20*eps)
assert(diff_max < 1000*eps)
print('pm_force OK')
| junkoda/fs2 | test/test_pm_force.py | Python | gpl-3.0 | 990 |
# encoding: utf-8
# main.py, copyright 2014 by Marko Čibej <[email protected]>
#
# This file is part of SvgMapper. Full sources and documentation
# are available here: https://github.com/tumbislav/SvgMapper
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
# Full licence is in the file LICENSE and at http://www.gnu.org/copyleft/gpl.html
__author__ = 'Marko Čibej'
import argparse
from svgmapper import *
from helper import logger
def main(config, resources=None, maps=None, simulate=False):
logger.info('Starting job')
with SvgMapper() as mapper:
mapper.load_config(config, resources)
if maps:
mapper.replace_targets(maps)
if not simulate:
mapper.run()
logger.info('Finished')
def parse_args():
parser = argparse.ArgumentParser(description='Transform maps in SVG format in various ways.')
parser.add_argument('config_file', help='The name of the configuration file')
parser.add_argument('-r', '--resource', help='Additional resource file(s)',
action='append', metavar='resource_file')
parser.add_argument('-m', '--map', help='Map(s) to run instead of those listed in config file', metavar='map_name')
parser.add_argument('-v', '--verbosity', help='Set verbosity: 0=errors only, 1=warnings, 2=info, 3=debug',
type=int, choices=range(0, 3), dest='verbosity')
parser.add_argument('-l', '--log', help='Output to named log file', metavar=('level(0-3)', 'logFile'), nargs=2)
parser.add_argument('-s', '--simulate', help='Don\'t actually do anything, just parse all the configurations',
action='store_true')
return parser.parse_args()
def set_logging(the_log, verbosity):
log_levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logger.setLevel(logging.DEBUG)
if the_log:
level = log_levels[int(the_log[0])]
lf = logging.FileHandler(the_log[1], mode='w')
lf.setLevel(level)
lf.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(lf)
lc = logging.StreamHandler()
if verbosity:
lc.setLevel(log_levels[verbosity])
else:
lc.setLevel(log_levels[2])
logger.addHandler(lc)
| tumbislav/SvgMapper | src/main.py | Python | gpl-3.0 | 2,923 |
#!/usr/bin/env python
from distutils.core import setup
import os
import sys
def main():
SHARE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"share")
data_files = []
# don't trash the users system icons!!
black_list = ['index.theme', 'index.theme~']
for path, dirs, files in os.walk(SHARE_PATH):
data_files.append(tuple((path.replace(SHARE_PATH,"share", 1),
[os.path.join(path, file) for file in files if file not in
black_list])))
setup(name="caffeine",
version="2.4.1",
description="""A status bar application able to temporarily prevent
the activation of both the screensaver and the "sleep" powersaving
mode.""",
author="The Caffeine Developers",
author_email="[email protected]",
url="https://launchpad.net/caffeine",
packages=["caffeine"],
data_files=data_files,
scripts=[os.path.join("bin", "caffeine")]
)
if __name__ == "__main__":
main()
| ashh87/caffeine | setup.py | Python | gpl-3.0 | 1,042 |
import numpy as np
import cv2
from scipy import interpolate
from random import randint
import IPython
from alan.rgbd.basic_imaging import cos,sin
from alan.synthetic.synthetic_util import rand_sign
from alan.core.points import Point
"""
generates rope using non-holonomic car model dynamics (moves with turn radius)
generates labels at ends of rope
parameters:
h, w of image matrix
l, w of rope
returns:
image matrix with rope drawn
[left label, right label]
"""
def get_rope_car(h = 420, w = 420, rope_l_pixels = 800 , rope_w_pixels = 8, pix_per_step = 10, steps_per_curve = 10, lo_turn_delta = 5, hi_turn_delta = 10):
#randomize start
init_pos = np.array([randint(0, w - 1), randint(0, h - 1), randint(0, 360)])
all_positions = np.array([init_pos])
#dependent parameter (use float division)
num_curves = int(rope_l_pixels/(steps_per_curve * pix_per_step * 1.0))
#point generation
for c in range(num_curves):
turn_delta = rand_sign() * randint(lo_turn_delta, hi_turn_delta)
for s in range(steps_per_curve):
curr_pos = all_positions[-1]
delta_pos = np.array([pix_per_step * cos(curr_pos[2]), pix_per_step * sin(curr_pos[2]), turn_delta])
all_positions = np.append(all_positions, [curr_pos + delta_pos], axis = 0)
#center the points (avoid leaving image bounds)
mid_x_points = (min(all_positions[:,0]) + max(all_positions[:,0]))/2.0
mid_y_points = (min(all_positions[:,1]) + max(all_positions[:,1]))/2.0
for pos in all_positions:
pos[0] -= (mid_x_points - w/2.0)
pos[1] -= (mid_y_points - h/2.0)
#draw rope
image = np.zeros((h, w))
prev_pos = all_positions[0]
for curr_pos in all_positions[1:]:
cv2.line(image, (int(prev_pos[0]), int(prev_pos[1])), (int(curr_pos[0]), int(curr_pos[1])), 255, rope_w_pixels)
prev_pos = curr_pos
#get endpoint labels, sorted by x
labels = [all_positions[0], all_positions[-1]]
if labels[0][0] > labels[1][0]:
labels = [labels[1], labels[0]]
#labels = [[l[0], l[1], l[2] + 90] for l in labels]
#Ignoring Rotation for Now
labels = [[l[0], l[1], 0] for l in labels]
#rejection sampling
for num_label in range(2):
c_label = labels[num_label]
#case 1- endpoints not in image
if check_bounds(c_label, [w, h]) == -1:
return image, labels, -1
#case 2- endpoint on top of other rope segment
if check_overlap(c_label, [w, h], image, rope_w_pixels) == -1:
return image, labels, -1
return image, labels, 1
def check_bounds(label, bounds):
bound_tolerance = 5
for dim in range(2):
if label[dim] < bound_tolerance or label[dim] > (bounds[dim] - 1 - bound_tolerance):
return -1
return 0
def check_overlap(label, bounds, image, rope_w_pixels):
lb = []
ub = []
for dim in range(2):
lb.append(int(max(0, label[dim] - rope_w_pixels)))
ub.append(int(min(bounds[dim] - 1, label[dim] + rope_w_pixels)))
pixel_sum = 0
for x in range(lb[0], ub[0]):
for y in range(lb[1], ub[1]):
pixel_sum += (image[y][x]/255.0)
#if more than 60% of adjacent (2 * rope_w x 2 * rope_w) pixels are white, endpoint is probably lying on rope
expected_sum = 0.6 * (ub[1] - lb[1]) * (ub[0] - lb[0])
if pixel_sum > expected_sum:
return -1
return 0
| mdlaskey/DeepLfD | src/deep_lfd/synthetic/synthetic_rope.py | Python | gpl-3.0 | 3,594 |
__author__ = 'nicolas'
# coding=utf-8
from os.path import expanduser
from ordereddict import OrderedDict
from Bio import SwissProt
import time
import MySQLdb as mdb
"""
Fuck!
from ordereddict import OrderedDict
import MySQLdb as mdb
dicc = {}
dictdebug_empty = OrderedDict()
dictdebug = dictdebug_empty
dictdebug['hola'] = 'chau'
print(dictdebug.items())
print(dictdebug_empty.items())
dictdebug_empty.clear()
print(dictdebug_empty.items())
print(dictdebug.items())
"""
# Establecer el tiempo de inicio del script
start_time = time.time()
# Variables del script
database = "ptmdb"
tabla_cuentas = "sprot_count1"
tabla_ptms = "sprot_ptms1"
file_name = "uniprot_sprot.dat"
desde = 0
hasta = 542783 # Hay 542782 entradas de AC??
# Conectar a la base de datos
con = mdb.connect('localhost', 'nicolas', passwd="nicolaslfp", db=database)
cur = con.cursor()
cur.execute("SELECT VERSION()")
cur.execute("USE " + database)
print("USE ptmdb;")
# Abrir el .dat de uniprot
uniprot_file = expanduser("~") + '/QB9_Files/' + file_name
output_file = expanduser("~") + '/QB9-git/QB9/resources/output.txt'
def count_amino_acids_ext(seq): # Defino una función que toma una secuencia y los cuenta
prot_dic2 = prot_dic
for aa in prot_dic2:
prot_dic2[aa] = seq.count(aa)
return prot_dic2 # y devuelve un dict ordenado con pares AA, #AA
# Armo un diccionario con los AAs que voy a contar
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
prot_dic = OrderedDict((k, 0) for k in abc)
# Interesting feature types
ptmrecords = ["MOD_RES", "LIPID", "CARBOHYD", "DISULFID", "CROSSLNK"]
# Non-experimental qualifiers for feature annotations
neqs = ["Probable", "Potential", "By similarity"] # Y "Experimental"
# Las categorías están en un diccionario con su type de mysql todo volar
categories = OrderedDict()
categories['AC'] = "varchar(30) NOT NULL" # accesion number
categories['FT'] = "varchar(30) NOT NULL"
categories['STATUS'] = "varchar(30) NOT NULL"
categories['PTM'] = "varchar(100) NOT NULL"
categories['FROM_RES'] = "varchar(10) NOT NULL"
categories['TO_RES'] = "varchar(10) NOT NULL"
categories['FROM_AA'] = "varchar(10) NOT NULL" # vamo a implementar el target directamente!!!! =D
categories['TO_AA'] = "varchar(10) NOT NULL"
categories['SQ'] = "text(45000) NOT NULL" # SQ SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;
categories['LENGTH'] = "varchar(200) NOT NULL" # SQ SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;
categories['ORG'] = "text(500) NOT NULL" # organism
categories['OC'] = "varchar(30) NOT NULL" # organism classification, vamos solo con el dominio
categories['OX'] = "varchar(200) NOT NULL" # taxonomic ID
categories['HO'] = "text(500)" # host organism
categories['inumber'] = "varchar(200) NOT NULL"
# categories['CC'] = "varchar(200)" # comments section, nos interesa el campo "PTM"
# categories['SQi'] = "varchar(200)" # SQ SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;
# Defino un diccionario modelo donde cargar los valores que voy a extraer de la lista
empty_data = OrderedDict()
for gato in categories: # usando las keys de categories y un valor por defecto todo vacío no es nulo ¿cómo hago?
empty_data[gato] = 'NOFT'
empty_data['FROM_RES'] = '?'
empty_data['TO_RES'] = '?'
empty_data['FROM_AA'] = '?'
empty_data['TO_AA'] = '?'
data = empty_data.copy() # este es el diccionario de registros vacío que voy a usar
print("DROP TABLE " + tabla_cuentas + ";")
print("DROP TABLE " + tabla_ptms + ";")
# Crear la tabla de cuentas
prot_dic_def_items = []
prot_dic_def = OrderedDict((k, 'SMALLINT') for k in abc)
for cat, value in prot_dic_def.items(): # concatenaciones key y valor
prot_dic_def_items.append(cat + ' ' + value) # guardadaes en la lista
table_def = ', '.join(prot_dic_def_items) # definicion de la tabla
print("CREATE TABLE IF NOT EXISTS "
+ tabla_cuentas
+ " (AC VARCHAR(30) UNIQUE, OC_ID VARCHAR(30), LENGTH MEDIUMINT,"
+ table_def
+ ") ENGINE=InnoDB;")
print("commit;")
# con.commit()
# Crear la tabla de ptms
table_def_items = [] # lista para concatenaciones de key y valor
for cat, value in categories.items(): # concatenaciones key y valor
table_def_items.append(cat + ' ' + value) # guardadaes en la lista
table_def_2 = ', '.join(table_def_items) # definicion de la tabla
print("CREATE TABLE IF NOT EXISTS " + tabla_ptms + " (" + table_def_2 + ") ENGINE=InnoDB;")
print("commit;")
# con.commit()
# Variables del loop
i = 0
j = 0
ptm = ''
out = []
listap = []
listaq = []
listar = []
olista = []
interes = []
with open(uniprot_file) as uniprot: # esto me abre y cierra el archivo al final
for record in SwissProt.parse(uniprot): # parseando los records de uniprot
i += 1
if i % 100 == 0:
print("commit;")
data = empty_data.copy() # en vez de vaciar el diccionario, le asigno el dafault sin enlazarlo al vacío
# Acá cargo los datos generales para las PTMs de una proteína/entrada de uniprot (instancias de entradas)
# tienen que cargarse en el orden de las columnas en la ptmdb y el del insert
# print(record.accessions[0])
data['AC'] = record.accessions[0] # solo el principal, el resto nose.
data['SQ'] = record.sequence
data['LENGTH'] = record.sequence_length # todo acá hay un problema? no entran las de mas de 999 residuos
data['ORG'] = record.organism # el bicho
data['OC'] = record.organism_classification[0] # el dominio del bicho
data['OX'] = record.taxonomy_id[0] # Id taxonomica del bicho
del olista[:]
if not record.host_organism:
data['HO'] = 'No host'
else:
for o in record.host_organism:
olista.append((o.split(";"))[0])
data['HO'] = ', '.join(olista) # y esto el host del virus ¿o parásito?
data['inumber'] = str(i) # solo para debuguear =) ver hasta donde llegó
# Generar y guardar el insert del #AA en la secuencia
del listaq[:]
contenido_aa = count_amino_acids_ext(record.sequence) # Guardo el dict con partes AA, #AA de la secuencia
for q in contenido_aa.itervalues():
listaq.append(str(q)) # y los pongo en una lista
sql_insert_values_q = ', '.join(listaq)
if i >= desde:
print("INSERT INTO " + tabla_cuentas + " VALUES ('"
+ record.accessions[0] + "', '"
+ record.organism_classification[0] + "', "
+ str(record.sequence_length)
+ ", " + sql_insert_values_q + ");")
# print("commit;")
# con.commit()
# Acá empiezo con los features, hay alguno interesante?
features = record.features # todo insertar los FTs en otra tabla junto con OC; OX, OR...?
del out[:]
del interes[:]
for a in range(0, len(features)): # guardar los campos "candidato" del FT en una lista llamada out
out.append(features[a][0])
interes = list(set(out).intersection(ptmrecords)) # armar un set con los interesantes y hacerlo lista interes
if interes: # si interes no está vacía, entonces hay algo para cargar
# todo evitar duplicados de secuencia, relacion via AC?
# ahora cargo cada PTM en data (subinstancias de entrada)
for feature in features: # iterar los features de la entrada
if feature[0] in interes: # si el titulo del FT interesa, proseguir ¡mejora un poco! =D
for tipo in interes: # iterear los tipos interesantes encontrados en el feature
if feature[0] in tipo: # si el feature evaluado interesante, cargar los datos en data[]
A = feature[1] # de el residuo tal (va a ser el mismo que el siguiente si está solo)
B = feature[2] # hacia el otro. OJO hay algunos desconocidos indicados con un "?"
C = feature[3] # este tiene la posta?
D = feature[4] # este aparece a veces? todo wtf?
# reiniciar FT, FROM y TO
data['FT'] = 'NOFT'
data['FROM_RES'] = '?'
data['TO_RES'] = '?'
data['FROM_AA'] = '?'
data['TO_AA'] = '?'
# Asignar FT
data['FT'] = feature[0]
data['FROM_RES'] = A
data['TO_RES'] = B
# reiniciar PTM y STATUS
ptm = ''
data['PTM'] = 'NOFT'
data['STATUS'] = "Experimental"
# Asignar STATUS y PTM
if C: # si C (el que tiene el nombre de la PTM y el STATUS) contiene algo
for neq in neqs: # iterar los STATUS posibles
if neq in C: # si C contiene el STATUS pirulo
data['STATUS'] = neq # asignar el valor a STATUS
C = C.replace('(' + neq + ")", '') # hay que sacar esta porquería
C = C.replace(neq, '')
# hay que sacar esta porquería si no aparece con paréntesis
break # esto corta con el loop más "cercano" en indentación
ptm = ((C.split(" /"))[0].split(';')[0]). \
rstrip(" ").rstrip(".").rstrip(" ")
# Obs: a veces las mods tienen identificadores estables que empiezan con "/"
# así que hay que sacarlo. y otas cosas después de un ";" CHAU.
# También hay CROSSLNKs con otras anotaciones, que los hace aparecer como únicas
# al contarlas, pero en realidad son casi iguales todo quizás ocurre con otras?
# Ver http://web.expasy.org/docs/userman.html#FT_line
# También le saco espacios y puntos al final.
# Odio esto del formato... todo no hay algo que lo haga mejor?
if tipo == 'DISULFID': # si el tipo es disulfuro, no hay mucho que decir.
ptm = "S-cysteinyl 3-(oxidosulfanyl)alanine (Cys-Cys)"
data['FROM_AA'] = 'C'
data['TO_AA'] = 'C'
else: # pero si no lo es, guardamos cosas normalmente.
# Asignar target residue
if A != '?':
data['FROM_AA'] = data['SQ'][int(data['FROM_RES'])-1]
else:
data['FROM_AA'] = '?'
if B != '?':
data['TO_AA'] = data['SQ'][int(data['TO_RES'])-1]
else:
data['TO_AA'] = '?'
if ptm.find("with") != -1: # si la ptm contiene la palabra "with" (caso crosslink)
ptm = ptm.split(" (with")[0].split(" (int")[0] # pero si la contiene, recortar
data['PTM'] = ptm
del listap[:]
for p in data.itervalues(): # itero los valores de los datos que fui cargando al dict.
listap.append(str(p).replace("'", "''")) # y los pongo en una lista
sql_insert_values_p = '\'' + \
'\', \''.join(listap) + \
'\''
# Que después uno como van en el INSERT
# El insert, en el que reemplazo ' por '', para escaparlas en sql
if i >= desde: # para hacerlo en partes
print(("INSERT INTO " + tabla_ptms + " VALUES (%r);"
% sql_insert_values_p).replace("-...", "").replace("\"", '').replace('.', ''))
# print("commit;")
# con.commit()
# unir los elementos de values con comas
else:
# Si, en cambio, la entrada no tiene FT insteresantes, solo cargo los datos generales y defaults
del listar[:]
for r in data.itervalues():
listar.append(str(r).replace("'", "''"))
sql_insert_values_r = '\'' + '\', \''.join(listar) + '\''
if i >= desde: # para hacerlo en partes
print(("INSERT INTO " + tabla_ptms + " VALUES (%r);"
% sql_insert_values_r).replace("\"", '').replace('.', ''))
# print("commit;")
# con.commit()
if i >= hasta: # segun uniprot el número de entradas de secuencias es 54247468
# print("\n")
# print(i)
break
# The sequence counts 60 amino acids per line, in groups of 10 amino acids, beginning in position 6 of the line.
# http://www.uniprot.org/manual/
# General Annotation: cofactores, mass spectrometry data, PTM (complementario al MOD_RES y otras PTMs..?)
# Sequence Annotation (Features): Sites (cleavage sites?), non-standard residue,
# MOD_RES (excluye lipidos, crosslinks y glycanos), lipidación, puente disulfuro, cross-link, glycosylation
# todo consider PE "protein existence", KW contiene "glycoprotein" qué otros?
# todo también dentro de FT
# output.close()
# print('\n')
# print(time.time() - start_time)
# """
| naikymen/QB9 | uniprot_parser/uniprot_parser_v01.py | Python | gpl-3.0 | 14,072 |
# -*- coding: utf-8 -*-
# © 2013-2016 Akretion (Alexis de Lattre <[email protected]>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
must_have_dates = fields.Boolean(
related='product_id.must_have_dates', readonly=True)
@api.multi
@api.constrains('start_date', 'end_date')
def _check_start_end_dates(self):
for invline in self:
if invline.start_date and not invline.end_date:
raise ValidationError(
_("Missing End Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and not invline.start_date:
raise ValidationError(
_("Missing Start Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and invline.start_date and \
invline.start_date > invline.end_date:
raise ValidationError(
_("Start Date should be before or be the same as "
"End Date for invoice line with Description '%s'.")
% (invline.name))
# Note : we can't check invline.product_id.must_have_dates
# have start_date and end_date here, because it would
# block automatic invoice generation/import. So we do the check
# upon validation of the invoice (see below the function
# action_move_create)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def inv_line_characteristic_hashcode(self, invoice_line):
"""Add start and end dates to hashcode used when the option "Group
Invoice Lines" is active on the Account Journal"""
code = super(AccountInvoice, self).inv_line_characteristic_hashcode(
invoice_line)
hashcode = '%s-%s-%s' % (
code,
invoice_line.get('start_date', 'False'),
invoice_line.get('end_date', 'False'),
)
return hashcode
@api.model
def line_get_convert(self, line, part):
"""Copy from invoice to move lines"""
res = super(AccountInvoice, self).line_get_convert(line, part)
res['start_date'] = line.get('start_date', False)
res['end_date'] = line.get('end_date', False)
return res
@api.model
def invoice_line_move_line_get(self):
"""Copy from invoice line to move lines"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
ailo = self.env['account.invoice.line']
for move_line_dict in res:
iline = ailo.browse(move_line_dict['invl_id'])
move_line_dict['start_date'] = iline.start_date
move_line_dict['end_date'] = iline.end_date
return res
@api.multi
def action_move_create(self):
"""Check that products with must_have_dates=True have
Start and End Dates"""
for invoice in self:
for iline in invoice.invoice_line_ids:
if iline.product_id and iline.product_id.must_have_dates:
if not iline.start_date or not iline.end_date:
raise UserError(_(
"Missing Start Date and End Date for invoice "
"line with Product '%s' which has the "
"property 'Must Have Start and End Dates'.")
% (iline.product_id.name))
return super(AccountInvoice, self).action_move_create()
| stellaf/sales_rental | account_invoice_start_end_dates/models/account_invoice.py | Python | gpl-3.0 | 3,875 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('agentex', '0014_remove_decision_datacollect'),
]
operations = [
migrations.AddField(
model_name='datapoint',
name='sorttimestamp',
field=models.DateTimeField(null=True, blank=True),
),
]
| tomasjames/citsciportal | app/agentex/migrations/0015_datapoint_sorttimestamp.py | Python | gpl-3.0 | 431 |
import numpy as np
import struct
import wave
from winsound import PlaySound, SND_FILENAME, SND_ASYNC
import matplotlib.pyplot as plt
CHUNK = 1 << 8
def play(filename):
PlaySound(filename, SND_FILENAME | SND_ASYNC)
fn = r"D:\b.wav"
f = wave.open(fn)
print(f.getparams())
ch = f.getnchannels()
sw = f.getsampwidth()
n = f.getnframes()
data = bytearray()
while len(data) < n * ch * sw:
data.extend(f.readframes(CHUNK))
data = np.array(struct.unpack('{n}h'.format(n=n * ch), data))
w = np.fft.fft(data)
freqs = np.fft.fftfreq(len(w))
module = np.abs(w)
idmax = module.argmax()
print(abs(freqs[idmax]) * f.getframerate())
plt.specgram(data)
plt.show()
| DavideCanton/Python3 | audio/freq.py | Python | gpl-3.0 | 663 |
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
import numpy as np
class Model():
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
else:
raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.float32, [args.batch_size, args.seq_length], name="input")
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length], name="targets")
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
inputs_data = tf.split(1, args.seq_length, self.input_data)
args.vocab_size = 1
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
# with tf.device("/cpu:0"):
# embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
# inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
#inputs = tf.split(1, args.seq_length, self.input_data)
# inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
#def loop(prev, _):
# prev = tf.matmul(prev, softmax_w) + softmax_b
# prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
# return tf.nn.embedding_lookup(embedding, prev_symbol)
#outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')
outputs, last_state = seq2seq.rnn_decoder(inputs_data, self.initial_state, cell)
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
#loss = seq2seq.sequence_loss_by_example([self.logits],
# [tf.reshape(self.targets, [-1])],
# [tf.ones([args.batch_size * args.seq_length])],
# args.vocab_size)
self.reg_cost = tf.reduce_sum(1e-1 * (tf.nn.l2_loss(softmax_w)))
target = tf.cast(self.targets, tf.float32)
self.target_vector = tf.reshape(target, [-1])
loss = tf.pow(self.logits / self.target_vector, 2)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length + self.reg_cost
self.final_state = last_state
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
def sample(self, sess, chars, vocab, num=200, prime='The ', sampling_type=1):
state = self.cell.zero_state(1, tf.float32).eval()
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = prime
char = prime[-1]
for n in range(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
if sampling_type == 0:
sample = np.argmax(p)
elif sampling_type == 2:
if char == ' ':
sample = weighted_pick(p)
else:
sample = np.argmax(p)
else: # sampling_type == 1 default:
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
| bottiger/Integer-Sequence-Learning | char/model.py | Python | gpl-3.0 | 4,473 |
# -*- coding: utf-8 -*-
import terrariumLogging
logger = terrariumLogging.logging.getLogger(__name__)
from pathlib import Path
import inspect
from importlib import import_module
import sys
import statistics
from hashlib import md5
from time import time, sleep
from operator import itemgetter
from func_timeout import func_timeout, FunctionTimedOut
import RPi.GPIO as GPIO
# pip install retry
from retry import retry
# For analog sensors
from gpiozero import MCP3008
# For I2C sensors
import smbus2
# Bluetooth sensors
from bluepy.btle import Scanner
from terrariumUtils import terrariumUtils, terrariumCache, classproperty
class terrariumSensorException(TypeError):
'''There is a problem with loading a hardware sensor.'''
pass
class terrariumSensorUnknownHardwareException(terrariumSensorException):
pass
class terrariumSensorInvalidSensorTypeException(terrariumSensorException):
pass
class terrariumSensorLoadingException(terrariumSensorException):
pass
class terrariumSensorUpdateException(terrariumSensorException):
pass
class terrariumSensor(object):
HARDWARE = None
TYPES = []
NAME = None
_CACHE_TIMEOUT = 30
_UPDATE_TIME_OUT = 10
@classproperty
def available_hardware(__cls__):
__CACHE_KEY = 'known_sensors'
cache = terrariumCache()
known_sensors = cache.get_data(__CACHE_KEY)
if known_sensors is None:
known_sensors = {}
all_types = []
# Start dynamically loading sensors (based on: https://www.bnmetrics.com/blog/dynamic-import-in-python3)
for file in sorted(Path(__file__).parent.glob('*_sensor.py')):
imported_module = import_module( '.' + file.stem, package='{}'.format(__name__))
for i in dir(imported_module):
attribute = getattr(imported_module, i)
if inspect.isclass(attribute) and attribute != __cls__ and issubclass(attribute, __cls__):
setattr(sys.modules[__name__], file.stem, attribute)
if attribute.HARDWARE is not None:
known_sensors[attribute.HARDWARE] = attribute
all_types += attribute.TYPES
# Update sensors that do not have a known type. Those are remote and scripts sensors
all_types = list(set(all_types))
for hardware in known_sensors:
if len(known_sensors[hardware].TYPES) == 0:
known_sensors[hardware].TYPES = all_types
cache.set_data(__CACHE_KEY,known_sensors,-1)
return known_sensors
# Return a list with type and names of supported switches
@classproperty
def available_sensors(__cls__):
data = []
all_types = ['conductivity'] # For now 'conductivity' is only available through script or remote
for (hardware_type, sensor) in __cls__.available_hardware.items():
if sensor.NAME is not None:
data.append({'hardware' : hardware_type, 'name' : sensor.NAME, 'types' : sensor.TYPES})
all_types += sensor.TYPES
# Remote and script sensors can handle all the known types
all_types = list(set(all_types))
for sensor in data:
if len(sensor['types']) == 0:
sensor['types'] = all_types
return sorted(data, key=itemgetter('name'))
@classproperty
def sensor_types(__cls__):
sensor_types = []
for sensor in __cls__.available_sensors:
sensor_types += sensor['types']
return sorted(list(set(sensor_types)))
# Return polymorph sensor....
def __new__(cls, sensor_id, hardware_type, sensor_type, address, name = '', unit_value_callback = None, trigger_callback = None):
known_sensors = terrariumSensor.available_hardware
if hardware_type not in known_sensors:
raise terrariumSensorUnknownHardwareException(f'Trying to load an unknown hardware device {hardware_type} at address {address} with name {name}')
if sensor_type not in known_sensors[hardware_type].TYPES:
raise terrariumSensorInvalidSensorTypeException(f'Hardware does not have a {sensor_type} sensor at address {address} with name {name}')
return super(terrariumSensor, cls).__new__(known_sensors[hardware_type])
def __init__(self, id, _, sensor_type, address, name = '', unit_value_callback = None, trigger_callback = None):
self._device = {'id' : None,
'name' : None,
'address' : None,
'type' : sensor_type, # Readonly property
'device' : None,
'cache_key' : None,
'power_mngt' : None,
'erratic_errors' : 0,
'last_update' : 0,
'value' : None}
self._sensor_cache = terrariumCache()
self.__unit_value_callback = unit_value_callback
self.__trigger_callback = trigger_callback
# Set the properties
self.id = id
self.name = name
self.address = address
# Load hardware can update the address value that is used for making a unique ID when not set
self.load_hardware()
# REMINDER: We do not take a measurement at this point. That is up to the developer to explicit request an update.
def __power_management(self, on):
# Some kind of 'power management' with the last gpio pin number :) https://raspberrypi.stackexchange.com/questions/68123/preventing-corrosion-on-yl-69
if self._device['power_mngt'] is not None:
logger.debug(f'Sensor {self} has power management enabled')
if on:
logger.debug('Enable power to the sensor {self} now.')
GPIO.output(self._device['power_mngt'], GPIO.HIGH)
sleep(1)
else:
logger.debug('Close power to the sensor {self} now.')
GPIO.output(self._device['power_mngt'], GPIO.LOW)
@property
def __sensor_cache_key(self):
if self._device['cache_key'] is None:
self._device['cache_key'] = md5(f'{self.HARDWARE}{self.address}'.encode()).hexdigest()
return self._device['cache_key']
@property
def id(self):
if self._device['id'] is None:
self._device['id'] = md5(f'{self.HARDWARE}{self.address}{self.type}'.encode()).hexdigest()
return self._device['id']
@id.setter
def id(self, value):
if value is not None:
self._device['id'] = value.strip()
@property
def hardware(self):
return self.HARDWARE
@property
def name(self):
return self._device['name']
@name.setter
def name(self, value):
if '' != value.strip():
self._device['name'] = value.strip()
@property
def address(self):
return self._device['address']
@property
def _address(self):
address = [ part.strip() for part in self.address.split(',') if '' != part.strip()]
return address
@address.setter
def address(self, value):
value = terrariumUtils.clean_address(value)
if value is not None and '' != value:
self._device['address'] = value
# Readonly property
@property
def device(self):
return self._device['device']
# Readonly property
@property
def sensor_type(self):
return self._device['type']
# Readonly property
@property
def type(self):
return self._device['type']
@property
def value(self):
return self._device['value']
@property
def last_update(self):
return self._device['last_update']
@property
def erratic(self):
return self._device['erratic_errors']
@erratic.setter
def erratic(self, value):
self._device['erratic_errors'] = value
def get_hardware_state(self):
pass
@retry(terrariumSensorLoadingException, tries=3, delay=0.5, max_delay=2, logger=logger)
def load_hardware(self, reload = False):
# Get hardware cache key based on the combination of hardware and address
hardware_cache_key = md5(f'HW-{self.HARDWARE}-{self.address}'.encode()).hexdigest()
# Load hardware device from cache
hardware = self._sensor_cache.get_data(hardware_cache_key)
if reload or hardware is None:
# Could not find valid hardware cache. So create a new hardware device
try:
hardware = func_timeout(self._UPDATE_TIME_OUT, self._load_hardware)
if hardware is not None:
# Store the hardware in the cache for unlimited of time
self._sensor_cache.set_data(hardware_cache_key,hardware,-1)
else:
# Raise error that hard is not loaded with an unknown message :(
raise terrariumSensorLoadingException(f'Unable to load sensor {self}: Did not return a device.')
except FunctionTimedOut:
# What ever fails... does not matter, as the data is still None and will raise a terrariumSensorUpdateException and trigger the retry
raise terrariumSensorLoadingException(f'Unable to load sensor {self}: timed out ({self._UPDATE_TIME_OUT} seconds) during loading.')
except Exception as ex:
raise terrariumSensorLoadingException(f'Unable to load sensor {self}: {ex}')
self._device['device'] = hardware
# Check for power management features and enable it if set
if self._device['power_mngt'] is not None:
GPIO.setup(self._device['power_mngt'], GPIO.OUT)
# When we get Runtime errors retry up to 3 times
@retry(terrariumSensorUpdateException, tries=3, delay=0.5, max_delay=2, logger=logger)
def get_data(self):
data = None
self.__power_management(True)
try:
data = func_timeout(self._UPDATE_TIME_OUT, self._get_data)
except FunctionTimedOut:
# What ever fails... does not matter, as the data is still None and will raise a terrariumSensorUpdateException and trigger the retry
logger.error(f'Sensor {self} timed out after {self._UPDATE_TIME_OUT} seconds during updating...')
except Exception as ex:
logger.error(f'Sensor {self} has exception: {ex}')
self.__power_management(False)
if data is None:
raise terrariumSensorUpdateException(f'Invalid reading from sensor {self}')
return data
def update(self, force = False):
if self._device['device'] is None:
raise terrariumSensorLoadingException(f'Sensor {self} is not loaded! Can not update!')
starttime = time()
data = self._sensor_cache.get_data(self.__sensor_cache_key)
if (data is None or force) and self._sensor_cache.set_running(self.__sensor_cache_key):
logger.debug(f'Start getting new data from sensor {self}')
try:
data = self.get_data()
self._sensor_cache.set_data(self.__sensor_cache_key,data, self._CACHE_TIMEOUT)
except Exception as ex:
logger.error(f'Error updating sensor {self}. Check your hardware! {ex}')
self._sensor_cache.clear_running(self.__sensor_cache_key)
current = None if data is None or self.sensor_type not in data else data[self.sensor_type]
if current is None:
self._sensor_cache.clear_data(self.__sensor_cache_key)
else:
self._device['last_update'] = int(starttime)
self._device['value'] = current
return current
def stop(self):
if self._device['power_mngt'] is not None:
GPIO.cleanup(self._device['power_mngt'])
def __repr__(self):
return f'{self.NAME} {self.type} named \'{self.name}\' at address \'{self.address}\''
# Auto discovery of known and connected sensors
@staticmethod
def scan_sensors(unit_value_callback = None, trigger_callback = None, **kwargs):
for (hardware_type,sensor_device) in terrariumSensor.available_hardware.items():
try:
for sensor in sensor_device._scan_sensors(unit_value_callback, trigger_callback, **kwargs):
yield sensor
except AttributeError as ex:
# Scanning not supported, just ignore
pass
class terrariumAnalogSensor(terrariumSensor):
HARDWARE = None
TYPES = []
NAME = None
__AMOUNT_OF_MEASUREMENTS = 5
def _load_hardware(self):
address = self._address
# Load the analog converter here
device = MCP3008(channel=int(address[0]), device=0 if len(address) == 1 or int(address[1]) < 0 else int(address[1]))
return device
def _get_data(self):
# This will return the measured voltage of the analog device.
values = []
for counter in range(self.__AMOUNT_OF_MEASUREMENTS):
value = self.device.value
if terrariumUtils.is_float(value):
values.append(float(value))
sleep(0.2)
# sort values from low to high
values.sort()
# Calculate average. Exclude the min and max value.
return statistics.mean(values[1:-1])
class terrariumI2CSensor(terrariumSensor):
@property
def _address(self):
address = super()._address
if type(address[0]) is str:
if not address[0].startswith('0x'):
address[0] = '0x' + address[0]
address[0] = int(address[0],16)
return address
def _open_hardware(self):
address = self._address
return smbus2.SMBus(1 if len(address) == 1 or int(address[1]) < 1 else int(address[1]))
def _load_hardware(self):
address = self._address
device = (address[0], smbus2.SMBus(1 if len(address) == 1 or int(address[1]) < 1 else int(address[1])))
return device
# def __exit__(self):
# print('I2C close with block')
class terrariumI2CSensorMixin():
# control constants
SOFTRESET = 0xFE
SOFTRESET_TIMEOUT = 0.1
TEMPERATURE_TRIGGER_NO_HOLD = 0xF3
TEMPERATURE_WAIT_TIME = 0.1
HUMIDITY_TRIGGER_NO_HOLD = 0xF5
HUMIDITY_WAIT_TIME = 0.1
def __soft_reset(self, i2c_bus):
i2c_bus.write_byte(self.device[0], self.SOFTRESET)
sleep(self.SOFTRESET_TIMEOUT)
def __get_data(self,i2c_bus, trigger, timeout):
data1 = data2 = None
# Send request for data
i2c_bus.write_byte(self.device[0], trigger)
sleep(timeout)
data1 = i2c_bus.read_byte(self.device[0])
try:
data2 = i2c_bus.read_byte(self.device[0])
except Exception as ex:
data2 = data1
return (data1,data2)
def _get_data(self):
data = {}
with self._open_hardware() as i2c_bus:
# Datasheet recommend do Soft Reset before measurement:
self.__soft_reset(i2c_bus)
if 'temperature' in self.TYPES:
bytedata = self.__get_data(i2c_bus, self.TEMPERATURE_TRIGGER_NO_HOLD,self.TEMPERATURE_WAIT_TIME)
data['temperature'] = ((bytedata[0]*256.0+bytedata[1])*175.72/65536.0)-46.85
if 'humidity' in self.TYPES:
bytedata = self.__get_data(i2c_bus, self.HUMIDITY_TRIGGER_NO_HOLD,self.HUMIDITY_WAIT_TIME)
data['humidity'] = ((bytedata[0]*256.0+bytedata[1])*125.0/65536.0)-6.0
return data
"""
TCA9548A I2C switch driver, Texas instruments
8 bidirectional translating switches
I2C SMBus protocol
Manual: tca9548.pdf
Source: https://github.com/IRNAS/tca9548a-python/blob/master/tca9548a.py
Added option for different I2C bus
"""
# import smbus
# import logging
class TCA9548A(object):
def __init__(self, address, bus = 1):
"""Init smbus channel and tca driver on specified address."""
try:
self.PORTS_COUNT = 8 # number of switches
self.i2c_bus = smbus2.SMBus(bus)
self.i2c_address = address
if self.get_control_register() is None:
raise ValueError
except ValueError:
logger.error("No device found on specified address!")
self.i2c_bus = None
except:
logger.error("Bus on channel {} is not available.".format(bus))
logger.info("Available busses are listed as /dev/i2c*")
self.i2c_bus = None
def get_control_register(self):
"""Read value (length: 1 byte) from control register."""
try:
value = self.i2c_bus.read_byte(self.i2c_address)
return value
except:
return None
def get_channel(self, ch_num):
"""Get channel state (specified with ch_num), return 0=disabled or 1=enabled."""
if ch_num < 0 or ch_num > self.PORTS_COUNT - 1:
return None
register = self.get_control_register()
if register is None:
return None
value = ((register >> ch_num) & 1)
return value
def set_control_register(self, value):
"""Write value (length: 1 byte) to control register."""
try:
if value < 0 or value > 255:
return False
self.i2c_bus.write_byte(self.i2c_address, value)
return True
except:
return False
def set_channel(self, ch_num, state):
"""Change state (0=disable, 1=enable) of a channel specified in ch_num."""
if ch_num < 0 or ch_num > self.PORTS_COUNT - 1:
return False
if state != 0 and state != 1:
return False
current_value = self.get_control_register()
if current_value is None:
return False
if state:
new_value = current_value | 1 << ch_num
else:
new_value = current_value & (255 - (1 << ch_num))
return_value = self.set_control_register(new_value)
return return_value
def __del__(self):
"""Driver destructor."""
self.i2c_bus = None
class terrariumBluetoothSensor(terrariumSensor):
__MIN_DB = -90
__SCAN_TIME = 3
@property
def _address(self):
address = super()._address
if len(address) == 1:
address.append(0)
elif len(address) == 2:
address[1] = int(address[1]) if terrariumUtils.is_float(address[1]) and terrariumUtils.is_float(address[1]) > 0 else 0
return address
@staticmethod
def _scan_sensors(sensorclass, ids = [], unit_value_callback = None, trigger_callback = None):
# Due to multiple bluetooth dongles, we are looping 10 times to see which devices can scan. Exit after first success
ok = True
for counter in range(10):
try:
devices = Scanner(counter).scan(terrariumBluetoothSensor.__SCAN_TIME)
for device in devices:
if device.rssi > terrariumBluetoothSensor.__MIN_DB and device.getValueText(9) is not None and device.getValueText(9).lower() in ids:
for sensor_type in sensorclass.TYPES:
logger.debug(sensor_type, sensorclass, device.addr)
yield terrariumSensor(None,
sensorclass.HARDWARE,
sensor_type,
device.addr + ('' if counter == 0 else f',{counter}'),
f'{sensorclass.NAME} measuring {sensor_type}',
unit_value_callback = unit_value_callback,
trigger_callback = trigger_callback)
# we found devices, so this device is ok! Stop trying more bluetooth devices
break
except Exception as ex:
ok = False
if not ok:
logger.warning('Bluetooth scanning is not enabled for normal users or there are zero Bluetooth LE devices available.... bluetooth is disabled!')
return []
| theyosh/TerrariumPI | hardware/sensor/__init__.py | Python | gpl-3.0 | 18,830 |
# coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
| ejoonie/heart_sound | main_waveform_20170517.py | Python | gpl-3.0 | 3,229 |
self.Step (Message = "Receptionist-N ->> Klient-N [genvej: fokus-modtagerliste] (måske)")
self.Step (Message = "Receptionist-N ->> Klient-N [retter modtagerlisten]")
| AdaHeads/Hosted-Telephone-Reception-System | use-cases/.patterns/adjust_recipients/test.py | Python | gpl-3.0 | 217 |
#
# Python module to parse OMNeT++ vector files
#
# Currently only suitable for small vector files since
# everything is loaded into RAM
#
# Authors: Florian Kauer <[email protected]>
#
# Copyright (c) 2015, Institute of Telematics, Hamburg University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import scipy.interpolate
import numpy as np
vectors = []
class OmnetVector:
def __init__(self,file_input):
self.vectors = {}
self.dataTime = {}
self.dataValues = {}
self.maxtime = 0
self.attrs = {}
for line in file_input:
m = re.search("([0-9]+)\t([0-9]+)\t([0-9.e\-+]+)\t([0-9.e\-+na]+)",line)
#m = re.search("([0-9]+)",line)
if m:
vector = int(m.group(1))
if not vector in self.dataTime:
self.dataTime[vector] = []
self.dataValues[vector] = []
time = float(m.group(3))
self.dataTime[vector].append(time)
self.maxtime = max(self.maxtime,time)
self.dataValues[vector].append(float(m.group(4)))
else:
# vector 7 Net802154.host[0].ipApp[0] referenceChangeStat:vector ETV
m = re.search("vector *([0-9]*) *([^ ]*) *(.*):vector",line)
if m:
number = int(m.group(1))
module = m.group(2)
name = m.group(3)
if not name in self.vectors:
self.vectors[name] = {}
self.vectors[name][module] = number
else:
m = re.search("attr ([^ ]*) ([^ ]*)\n",line)
if m:
self.attrs[m.group(1)] = m.group(2)
def get_vector(self,name,module,resample=None):
num = self.vectors[name][module]
(time,values) = (self.dataTime[num],self.dataValues[num])
if resample != None:
newpoints = np.arange(0,self.maxtime,resample)
lastvalue = values[-1]
return (newpoints, scipy.interpolate.interp1d(time,values,'zero',assume_sorted=True,
bounds_error=False,fill_value=(0,lastvalue)).__call__(newpoints))
else:
return (time,values)
def get_attr(self,name):
return self.attrs[name]
| i-tek/inet_ncs | simulations/analysis_tools/python/omnet_vector.py | Python | gpl-3.0 | 3,886 |
#!/usr/bin/env python
# coding=utf-8
"""598. Split Divisibilities
https://projecteuler.net/problem=598
Consider the number 48.
There are five pairs of integers $a$ and $b$ ($a \leq b$) such that $a \times
b=48$: (1,48), (2,24), (3,16), (4,12) and (6,8).
It can be seen that both 6 and 8 have 4 divisors.
So of those five pairs one consists of two integers with the same number of
divisors.
In general:
Let $C(n)$ be the number of pairs of positive integers $a \times b=n$, ($a
\leq b$) such that $a$ and $b$ have the same number of divisors;
so $C(48)=1$.
You are given $C(10!)=3$: (1680, 2160), (1800, 2016) and (1890,1920).
Find $C(100!)$
"""
| openqt/algorithms | projecteuler/pe598-split-divisibilities.py | Python | gpl-3.0 | 660 |
import pyes
import os
from models import *
from sqlalchemy import select
from downloader import download
import utils
import re
import time
class Search(object):
def __init__(self,host,index,map_name,mapping=None,id_key=None):
self.es = pyes.ES(host)
self.index = index
self.map_name = map_name
self.mapping = mapping
self.id_key = id_key
def create_index(self):
self.es.create_index_if_missing(self.index)
if self.mapping:
if self.id_key:
self.es.put_mapping(self.map_name,{
self.map_name:{
'_id':{
'path':self.id_key
},
'properties':self.mapping}
},[self.index])
else:
self.es.put_mapping(self.map_name,{
self.map_name:{
'properties':self.mapping
}
},[self.index])
self.es.refresh(self.index)
def index_item(self,item):
self.es.index(item,self.index,self.map_name)
self.es.refresh(self.index)
def convert_to_document(revision):
temp = {}
rev_key = [ i for i in dir(revision) if not re.match('^_',i) ]
bill_key = [ i for i in dir(revision.bill) if not re.match('^_',i) ]
for key in rev_key:
if key != 'metadata' and key != 'bill':
temp[key] = getattr(revision,key)
for key in bill_key:
if key != 'metadata' and key!='id' and key!='bill_revs':
temp[key] = getattr(revision.bill,key)
full_path = download(temp['url'])
if full_path:
temp['document'] = pyes.file_to_attachment(full_path)
return temp
def initial_index():
host = '127.0.0.1:9200'
index = 'bill-index'
map_name = 'bill-type'
mapping = {
'document':{
'type':'attachment',
'fields':{
"title" : { "store" : "yes" },
"file" : {
"term_vector":"with_positions_offsets",
"store":"yes"
}
}
},
'name':{
'type':'string',
'store':'yes',
'boost':1.0,
'index':'analyzed'
},
'long_name':{
'type':'string',
'store':'yes',
'boost':1.0,
'index':'analyzed'
},
'status':{
'type':'string',
'store':'yes',
},
'year':{
'type':'integer',
'store':'yes'
},
'read_by':{
'type':'string',
'store':'yes',
'index':'analyzed'
},
'date_presented':{
'type':'date',
'store':'yes'
},
'bill_id':{
'type':'integer',
'store':'yes'
},
'id':{
'type':'integer',
'store':'yes'
}
}
search = Search(host,index,map_name,mapping)
search.create_index()
initdb()
session = DBSession()
revision = (session.query(BillRevision)
.join((BillRevision.bill,Bill)).all()
)
for rev in revision:
temp = convert_to_document(rev)
search.index_item(temp)
time.sleep(5)
def index_single(rev_id):
host = '127.0.0.1:9200'
index = 'bill-index'
map_name = 'bill-type'
initdb()
session = DBSession()
revision = (session.query(BillRevision).get(rev_id)
)
temp = convert_to_document(revision)
search = Search(host,index,map_name)
search.index_item(temp)
if __name__ == '__main__':
initial_index()
| sweemeng/Malaysian-Bill-Watcher | billwatcher/indexer.py | Python | gpl-3.0 | 3,747 |
import requests
from bs4 import BeautifulSoup
def trade_spider(max_pages):
page = 1
while page <= max_pages:
url = "https://thenewboston.com/videos.php?cat=98&video=20144" #+ str(page)
source_code = request.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for link in soup.findAll("a", {"class": "itemname"}):
href = link.get("href")
print("href")
trade_spider(1)
def get_single_item_data(item_url):
source_code = request.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for item_name in soup.findAll("a", {"class": "i-name"}):
print(item_name.string)
| Tbear1981/bitcoin-overseer | files/webcrawler.py | Python | gpl-3.0 | 705 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangorest.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| IntroSDE/introsde | projects/tutorial_django_rest_framework_2017/djangorest/manage.py | Python | gpl-3.0 | 808 |
for i in range(0):
i += 1
for j in range(0, 1, 3):
j += 1
for k in range(9, 1, -9):
k += 1
for n in range(0, 1.1): # Error on this line
n += 1
for m in range(4, 5):
m += 1
| RyanDJLee/pyta | examples/invalid_range_index_example.py | Python | gpl-3.0 | 197 |
# -*- encoding: UTF-8 -*-
import datetime
from sqlalchemy import MetaData, Table, Column, String, Integer
from Interface import AbstractDataObject
from utils.String import attributes_repr
def define_event_table(meta: MetaData):
return Table(
'events', meta,
Column('book_id', String, primary_key=True),
Column('reader_id', String, primary_key=True),
Column('event_date', String, primary_key=True),
Column('event_type', String, primary_key=True),
Column('times', Integer),
)
class Event(AbstractDataObject):
__attributes__ = ('book_id', 'reader_id', 'event_date', 'event_type')
__repr__ = attributes_repr
def __init__(self, book_id: str, reader_id: str, event_date: str, event_type: str):
self.book_id = book_id
self.reader_id = reader_id
self.event_date = event_date
self.event_type = event_type
self.times = 1
def __eq__(self, other):
if type(self) != type(other):
return False
else:
if self.event_date == other.event_date:
if self.book_id == other.book_id and self.reader_id == other.reader_id and \
self.event_type == other.event_type:
return True
else:
return False
else:
return False
@property
def date(self):
return datetime.datetime.strptime(self.event_date, '%Y%m%d').date()
def update_from(self, value):
if type(value) == type(self):
if self == value:
self.times += 1
else:
pass
else:
raise NotImplementedError
return self
def compare_by(self, **kwargs):
for tag in kwargs:
if tag not in self.__attributes__:
raise AttributeError('Event has no attribute {}'.format(tag))
else:
if kwargs[tag] != getattr(self, tag):
return False
return True
@classmethod
def init_from(cls, value):
if isinstance(value, dict):
return cls(
book_id=value['sysID'],
reader_id=value['userID'],
event_date=value['event_date'],
event_type=value['event_type'],
)
else:
raise NotImplementedError
@property
def hashable_key(self):
return '|'.join([self.book_id, self.reader_id, self.event_date, self.event_type])
| mingotang/libdata | structures/Event.py | Python | gpl-3.0 | 2,531 |
#!/usr/bin/env python
import logging
import os
import urllib
from cvmfsreplica.cvmfsreplicaex import PluginConfigurationFailure
from cvmfsreplica.interfaces import RepositoryPluginAcceptanceInterface
import cvmfsreplica.pluginsmanagement as pm
class Updatedserver(RepositoryPluginAcceptanceInterface):
def __init__(self, repository, conf):
self.log = logging.getLogger('cvmfsreplica.updatedserver')
self.repository = repository
self.conf = conf
try:
self.url = self.repository.cvmfsconf.get('CVMFS_STRATUM0')
self.reportplugins = pm.readplugins(self.repository,
'repository',
'report',
self.conf.namespace('acceptance.updatedserver.',
exclude=True)
)
except:
raise PluginConfigurationFailure('failed to initialize Updatedserver plugin')
self.log.debug('plugin Updatedserver initialized properly')
#def verify(self):
# '''
# checks if file .cvmfspublished
# was updated more recently than variable
# repository.last_published
# '''
# try:
# # FIXME
# # maybe we should try a couple of times in case of failures before failing definitely
# for line in urllib.urlopen('%s/.cvmfspublished' %self.url).readlines():
# if line.startswith('T'):
# time = int(line[1:-1])
# break
# out = time > self.repository.last_published
# if out == False:
# self._notify_failure('No new content at the server for repository %s' \
# %self.repository.repositoryname)
# return out
# except:
# self.log.warning('file %s/.cvmfspublished cannot be read. Returning False' %self.url)
# return False
def verify(self):
'''
checks if the revision number in local copy of .cvmfspublished
is different that the revision number of remote .cvmfspublished
'''
try:
# FIXME
# maybe we should try a couple of times in case of failures before failing definitely
for line in urllib.urlopen('%s/.cvmfspublished' %self.url).readlines():
if line.startswith('S'):
serverrevision = int(line[1:-1])
break
# read the local revision number
cvmfs_upstream_storage = self.repository._get_cvmfs_upstream_storage() # FIXME, this should not be here
localfile = '%s/.cvmfspublished' %cvmfs_upstream_storage
if not os.path.isfile(localfile):
self.log.warning('local file %s does not exist. Returning True' %localfile)
return True
else:
# FIXME: too much duplicated code
for line in open(localfile):
if line.startswith('S'):
localrevision = int(line[1:-1])
break
out = (serverrevision != localrevision)
if out == False:
self._notify_failure('No new content at the server for repository %s' \
%self.repository.repositoryname)
return out
except:
self.log.warning('file %s/.cvmfspublished cannot be read. Returning False' %self.url)
return False
def _notify_failure(self, msg):
for report in self.reportplugins:
report.notifyfailure(msg)
| jose-caballero/cvmfsreplica | cvmfsreplica/plugins/repository/acceptance/Updatedserver.py | Python | gpl-3.0 | 3,761 |
from django.db import models
class Licensor(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
| jampueroc/scrapper_anime | visualizacion/models/licensor.py | Python | gpl-3.0 | 216 |
import socket
import sys
import time
server_add = './bob_system_socket'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
message = sys.argv[1]+" "+sys.argv[2]
if sys.argv[1] == 'set':
message+= " "+sys.argv[3]
else:
message+= " null"
try:
sock.connect(server_add)
except socket.error, msg:
print >>sys.stderr, msg
sys.exit(1)
sock.send(message)
data = sock.recv(1024)
if data: print 'reply from server:', data
time.sleep(1)
sock.close()
| tibor0991/OBM-BOB | bob-main/test_sender.py | Python | gpl-3.0 | 455 |
import inspect
from django.utils.translation import activate
class MenuItemMixin:
"""
This mixins injects attributes that start with the 'menu_' prefix into
the context generated by the view it is applied to.
This behavior can be used to highlight an item of a navigation component.
"""
def get_context_data(self, **kwargs):
context = super(MenuItemMixin, self).get_context_data(**kwargs)
vattrs = inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))
menu_kwargs = dict(a for a in vattrs if a[0].startswith("menu_"))
context.update(menu_kwargs)
return context
class ActivateLegacyLanguageViewMixin:
""" """
def activate_legacy_language(self, *args, **kwargs):
if "lang" in kwargs and kwargs["lang"] == "en" or self.request.GET.get("lang") == "en":
activate("en")
else:
activate("fr")
| erudit/eruditorg | eruditorg/base/viewmixins.py | Python | gpl-3.0 | 913 |
# -*- coding:utf-8 -*-
# @author xupingmao <[email protected]>
# @since 2020/08/22 21:54:56
# @modified 2022/02/26 10:40:22
import xauth
import xtemplate
import xutils
import os
import re
import sys
import platform
import xconfig
from xutils import dateutil
from xutils import fsutil
from xutils import Storage
from xutils import mem_util
try:
import sqlite3
except ImportError:
sqlite3 = None
def get_xnote_version():
return xconfig.get_global_config("system.version")
def get_mem_info():
mem_used = 0
mem_total = 0
result = mem_util.get_mem_info()
mem_used = result.mem_used
sys_mem_used = result.sys_mem_used
sys_mem_total = result.sys_mem_total
return "%s/%s/%s" % (mem_used, sys_mem_used, sys_mem_total)
def get_python_version():
return sys.version
def get_startup_time():
return dateutil.format_time(xconfig.START_TIME)
def get_free_data_space():
try:
size = fsutil.get_free_space(xconfig.get_system_dir("data"))
return xutils.format_size(size)
except:
xutils.print_exc()
return "<未知>"
class SystemInfoItem:
def __init__(self, name = "", value = ""):
self.name = name
self.value = value
class InfoHandler:
@xauth.login_required("admin")
def GET(self):
items = [
SystemInfoItem("Python版本", value = get_python_version()),
SystemInfoItem("Xnote版本", value = get_xnote_version()),
SystemInfoItem("内存信息", value = get_mem_info()),
SystemInfoItem("磁盘可用容量", get_free_data_space()),
SystemInfoItem("sqlite版本", sqlite3.sqlite_version if sqlite3 != None else ''),
SystemInfoItem("CPU型号", platform.processor()),
SystemInfoItem("操作系统", platform.system()),
SystemInfoItem("操作系统版本", platform.version()),
SystemInfoItem("系统启动时间", get_startup_time()),
]
return xtemplate.render("system/page/system_info.html", items = items,
runtime_id = xconfig.RUNTIME_ID)
xurls = (
r"/system/info", InfoHandler
) | xupingmao/xnote | handlers/system/system_info.py | Python | gpl-3.0 | 2,138 |
# -*- coding: utf-8 -*-
from bottle import run, get, post, view, request, redirect, route, static_file, template
import bottle
import json
import threading
import requests
import time
import sys
messages = set([])
@bottle.route('/static/<path:path>')
def server_static(path):
return static_file(path, root='static')
@get('/chat')
@view('chat')
def chat():
name = request.query.name
return dict(msg=list(messages), name=name)
@route('/')
def index():
redirect('chat')
@post('/send')
def sendmsg():
name = request.forms.getunicode('name')
msg = request.forms.getunicode('msg')
global messages
if name != None and msg != None:
messages.add((name, msg))
redirect('chat?name=' + name)
else:
redirect('chat')
run(host='localhost', port=int(sys.argv[1]))
| jpwbernardi/Computacao-Distribuida | Trabalho1/main.py | Python | gpl-3.0 | 816 |
#!/usr/bin/python3
'''
This is a simple example of how to use the dbm.gnu module of the
standard python library
NOTES:
- the attempt to insert None as value throws an exception.
so only strings and bytes are allowed.
'''
import dbm.gnu # for open
# the 'c' in the next row means open rw and create if it doesn't exist
d = dbm.gnu.open('/tmp/foo.gdbm', 'c')
d['one'] = 'ehad'
d['two'] = 'shtaim'
d['three'] = None
d.close()
| nonZero/demos-python | src/examples/short/dbm/gdbm_insert.py | Python | gpl-3.0 | 431 |
import gc
import os
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from util import generate_features
def get_arguments():
parser = argparse.ArgumentParser(description='Generate features using a previously trained model')
parser.add_argument('data', type=str, help='File containing the input smiles matrices')
parser.add_argument('model', type=str, help='The model file')
parser.add_argument('features', type=str, help='Output file that will contain the generated features')
parser.add_argument('--batch_size', type=int, default=100, help='Size of the batches (default: 100)')
return parser.parse_args()
args = get_arguments()
generate_features.generate_features(args.data, args.model, args.features, args.batch_size)
gc.collect()
| patrick-winter-knime/deep-learning-on-molecules | smiles-vhts/generate_features.py | Python | gpl-3.0 | 769 |
"""
Diabicus: A calculator that plays music, lights up, and displays facts.
Copyright (C) 2016 Michael Lipschultz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import os
import re
from functools import reduce
from .compute import ComputationError
def is_int(val):
""" Returns True if val is an int or a float with 0 fractional part """
return isinstance(val, int) or (isinstance(val, float) and val % 1 == 0)
def is_rational(val):
"""
Returns True if val is an int or float and not irrational.
Determining irrationality is done through the is_irrational method.
"""
return isinstance(val, (int, float)) and not is_irrational(val)
def is_irrational(val):
"""
Returns True if val is irrational.
Irrationality is determined by whether val is transcendental (as
determined by is_transcendental) or sqrt(2) or golden ratio.
"""
return is_transcendental(val) or val in {2**.5, GOLDEN_RATIO}
def is_transcendental(val):
""" Returns True if val is transcendental (i.e. pi or e). """
return val in (math.pi, math.e)
def is_real(val):
""" Returns True if val is int or float. """
return isinstance(val, (int, float))
def is_complex(val):
""" Returns True if val is complex. """
return isinstance(val, complex)
def is_surreal(val):
""" Returns True if val is surreal (currently always returns False). """
return False
def is_number(val):
""" Returns True if val is int, float, or complex. """
return isinstance(val, (int, float, complex))
def is_error(val):
""" Returns True if val is a ComputationError. """
return isinstance(val, ComputationError)
GOLDEN_RATIO = (1 + 5**0.5) / 2
GRAHAMS_NUMBER = False
I = complex(0, 1)
PI_DIGITS = (3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2,
6, 4, 3, 3, 8, 3, 2, 7, 9, 5, 0, 2, 8, 8, 4, 1, 9, 7, 1, 6, 9, 3,
9, 9, 3, 7, 5, 1, 0, 5, 8, 2, 0, 9, 7, 4, 9, 4, 4, 5, 9, 2, 3, 0,
7, 8, 1, 6, 4, 0, 6, 2, 8, 6, 2, 0, 8, 9, 9, 8, 6, 2, 8, 0, 3, 4,
8, 2, 5, 3, 4, 2, 1, 1, 7, 0, 6, 7, 9, 8, 2, 1, 4
)
PRIME_NUMBERS = []
def __load_primes():
"""
Loads a comma-delimited list of prime numbers into PRIME_NUMBERS.
Prime numbers are loaded from the file prime_numbers.csv in the same
location as this python file and stores them into the global
variable PRIME_NUMBERS.
"""
global PRIME_NUMBERS
path = os.path.dirname(__file__)
with open(os.path.join(path, 'prime_numbers.csv')) as fin:
PRIME_NUMBERS = [int(v) for v in fin.read().split(',')]
__load_primes()
def is_prime(number):
""" Returns True if number is a prime number. """
return is_int(number) and number > 1 and int(number) in PRIME_NUMBERS
FACTORS_ALL = 'all'
FACTORS_PROPER = 'proper'
FACTORS_PRIME = 'prime'
def factors(num, form=FACTORS_PROPER):
"""
Return a list of factors for the provided number.
If form is FACTORS_PRIME, then the list will only contain the prime
factors of num. The product of the values in the list will always
return num. That is, if the number is a product of more than one of
the same prime (e.g. 12 = 2*2*3), then the list will contain those
duplicates (e.g. [2, 2, 3] in the example).
If form is FACTORS_ALL, then the list will contain all positive
integers that exactly divide num. For example, with num=12, the
list returned is [1, 2, 3, 4, 12].
If form is FACTORS_PROPER (default), then the list will be the same
as FACTORS_ALL, except the list will not include num. So, for
num=12, the list returned would be [1, 2, 3, 4].
If num is not an integer (as determined by is_int) greater than 1,
return empty list.
"""
if not is_int(num) or num < 2:
return []
if form == FACTORS_PRIME:
primes = []
i = 2
while num % i == 0:
primes.append(i)
num /= i
i = 3
while num > 1:
while num % i == 0:
primes.append(i)
num /= i
i += 2
return primes
else:
all_factors = reduce(list.__add__,
([i, num//i] for i in range(1, int(num**0.5) + 1) if num % i == 0)
)
if form == FACTORS_PROPER:
all_factors.remove(num)
return all_factors
FIBONACCI_NUMBERS = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233,
377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711,
28657, 46368, 75025, 121393, 196418, 317811, 514229,
832040, 1346269
]
LUCAS_NUMBERS = (2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123, 199, 322, 521, 843,
1364, 2207, 3571, 5778, 9349, 15127, 24476, 39603, 64079,
103682, 167761, 271443, 439204, 710647, 1149851, 1860498,
3010349, 4870847, 7881196, 12752043, 20633239, 33385282
)
def is_subsequence_of(needle, haystack):
"""
Returns True if needle occurs as a consecutive subsequence in haystack.
Both needle and haystack must be ordered containers. The values in
needle must appear in haystack in the order they appear in needle
and must be consecutive in haystack.
For example, with needle=[1,2,3] and haystack=[1,1,2,3,4], the
function returns True since needle starts at index 1 in haystack.
With needle=[1,2,4] and haystack=[1,1,2,3,4], the function returns
False since, although the values do appear in haystack in the
correct order, they are not consecutive.
An empty needle will always return False.
"""
if len(needle) == 0:
return False
for offset in (i for i, x in enumerate(haystack) if x == needle[0]):
if offset + len(needle) > len(haystack):
return False
matches = [needle[i] == haystack[offset+i] for i in range(1, len(needle))]
if len(matches) == len(needle)-1 and all(matches):
return True
return False
def is_close(num1, num2, threshold=1e-5, method='raw'):
"""
Returns True if num1 is within threshold of num2.
If method is 'raw', then the closeness is determined by the absolute
value of the difference between num1 and num2.
If method is 'pct', then the absolute value of percent difference is
calculated and used.
num1 and num2 can be iterable. If one is iterable, then as long as
one value in the iterable object is close to the other number, the
function returns True. If both are iterable, then as long as one
value in num1 is close to one value in num2, the function returns
True.
"""
if isinstance(num1, ComputationError) or isinstance(num2, ComputationError):
return False
elif hasattr(num1, '__iter__'):
return any(is_close(n, num2, threshold) for n in num1)
elif hasattr(num2, '__iter__'):
return any(is_close(num1, n, threshold) for n in num2)
elif ((isinstance(num1, complex) or isinstance(num2, complex))
and not isinstance(num1, type(num2))):
return False
else:
if method == 'pct':
if num1 == num2 and num1 == 0:
return True
else:
return abs(num1-num2) / max([abs(v) for v in (num1, num2) if v != 0]) < threshold
else:
return abs(num1-num2) < threshold
| lipschultz/diabicus | src/numeric_tools.py | Python | gpl-3.0 | 8,003 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_bd
short_description: Manage Bridge Domains (BD) on Cisco ACI Fabrics (fv:BD)
description:
- Manages Bridge Domains (BD) on Cisco ACI Fabrics.
- More information from the internal APIC class
I(fv:BD) at U(https://developer.cisco.com/media/mim-ref/MO-fvBD.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
requirements:
- ACI Fabric 1.0(3f)+
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
arp_flooding:
description:
- Determines if the Bridge Domain should flood ARP traffic.
- The APIC defaults new Bridge Domains to C(no).
choices: [ no, yes ]
default: no
bd:
description:
- The name of the Bridge Domain.
aliases: [ bd_name, name ]
bd_type:
description:
- The type of traffic on the Bridge Domain.
- The APIC defaults new Bridge Domains to C(ethernet).
choices: [ ethernet, fc ]
default: ethernet
description:
description:
- Description for the Bridge Domain.
enable_multicast:
description:
- Determines if PIM is enabled
- The APIC defaults new Bridge Domains to C(no).
choices: [ no, yes ]
default: no
enable_routing:
description:
- Determines if IP forwarding should be allowed.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
default: yes
endpoint_clear:
description:
- Clears all End Points in all Leaves when C(yes).
- The APIC defaults new Bridge Domains to C(no).
- The value is not reset to disabled once End Points have been cleared; that requires a second task.
choices: [ no, yes ]
default: no
endpoint_move_detect:
description:
- Determines if GARP should be enabled to detect when End Points move.
- The APIC defaults new Bridge Domains to C(garp).
choices: [ default, garp ]
default: garp
endpoint_retention_action:
description:
- Determines if the Bridge Domain should inherit or resolve the End Point Retention Policy.
- The APIC defaults new Bridge Domain to End Point Retention Policies to C(resolve).
choices: [ inherit, resolve ]
default: resolve
endpoint_retention_policy:
description:
- The name of the End Point Retention Policy the Bridge Domain should use when
overriding the default End Point Retention Policy.
igmp_snoop_policy:
description:
- The name of the IGMP Snooping Policy the Bridge Domain should use when
overriding the default IGMP Snooping Policy.
ip_learning:
description:
- Determines if the Bridge Domain should learn End Point IPs.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
ipv6_nd_policy:
description:
- The name of the IPv6 Neighbor Discovery Policy the Bridge Domain should use when
overridding the default IPV6 ND Policy.
l2_unknown_unicast:
description:
- Determines what forwarding method to use for unknown l2 destinations.
- The APIC defaults new Bridge domains to C(proxy).
choices: [ proxy, flood ]
default: proxy
l3_unknown_multicast:
description:
- Determines the forwarding method to use for unknown multicast destinations.
- The APCI defaults new Bridge Domains to C(flood).
choices: [ flood, opt-flood ]
default: flood
limit_ip_learn:
description:
- Determines if the BD should limit IP learning to only subnets owned by the Bridge Domain.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
default: yes
multi_dest:
description:
- Determines the forwarding method for L2 multicast, broadcast, and link layer traffic.
- The APIC defaults new Bridge Domains to C(bd-flood).
choices: [ bd-flood, drop, encap-flood ]
default: bd-flood
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
vrf:
description:
- The name of the VRF.
aliases: [ vrf_name ]
'''
EXAMPLES = r'''
- name: Add Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: false
state: present
tenant: prod
bd: web_servers
vrf: prod_vrf
- name: Add an FC Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: false
state: present
tenant: prod
bd: storage
bd_type: fc
vrf: fc_vrf
enable_routing: no
- name: Modify a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: present
tenant: prod
bd: web_servers
arp_flooding: yes
l2_unknown_unicast: flood
- name: Query All Bridge Domains
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: query
- name: Query a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: query
tenant: prod
bd: web_servers
- name: Delete a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: absent
tenant: prod
bd: web_servers
'''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
arp_flooding=dict(choices=['no', 'yes']),
bd=dict(type='str', aliases=['bd_name', 'name']),
bd_type=dict(type='str', choices=['ethernet', 'fc']),
description=dict(type='str'),
enable_multicast=dict(type='str', choices=['no', 'yes']),
enable_routing=dict(type='str', choices=['no', 'yes']),
endpoint_clear=dict(type='str', choices=['no', 'yes']),
endpoint_move_detect=dict(type='str', choices=['default', 'garp']),
endpoint_retention_action=dict(type='str', choices=['inherit', 'resolve']),
endpoint_retention_policy=dict(type='str'),
igmp_snoop_policy=dict(type='str'),
ip_learning=dict(type='str', choices=['no', 'yes']),
ipv6_nd_policy=dict(type='str'),
l2_unknown_unicast=dict(choices=['proxy', 'flood']),
l3_unknown_multicast=dict(choices=['flood', 'opt-flood']),
limit_ip_learn=dict(type='str', choices=['no', 'yes']),
multi_dest=dict(choices=['bd-flood', 'drop', 'encap-flood']),
state=dict(choices=['absent', 'present', 'query'], type='str', default='present'),
tenant=dict(type='str', aliases=['tenant_name']),
vrf=dict(type='str', aliases=['vrf_name']),
gateway_ip=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
scope=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
subnet_mask=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd', 'tenant']],
['state', 'present', ['bd', 'tenant']],
],
)
arp_flooding = module.params['arp_flooding']
bd = module.params['bd']
bd_type = module.params['bd_type']
if bd_type == 'ethernet':
# ethernet type is represented as regular, but that is not clear to the users
bd_type = 'regular'
description = module.params['description']
enable_multicast = module.params['enable_multicast']
enable_routing = module.params['enable_routing']
endpoint_clear = module.params['endpoint_clear']
endpoint_move_detect = module.params['endpoint_move_detect']
if endpoint_move_detect == 'default':
# the ACI default setting is an empty string, but that is not a good input value
endpoint_move_detect = ''
endpoint_retention_action = module.params['endpoint_retention_action']
endpoint_retention_policy = module.params['endpoint_retention_policy']
igmp_snoop_policy = module.params['igmp_snoop_policy']
ip_learning = module.params['ip_learning']
ipv6_nd_policy = module.params['ipv6_nd_policy']
l2_unknown_unicast = module.params['l2_unknown_unicast']
l3_unknown_multicast = module.params['l3_unknown_multicast']
limit_ip_learn = module.params['limit_ip_learn']
multi_dest = module.params['multi_dest']
state = module.params['state']
tenant = module.params['tenant']
vrf = module.params['vrf']
# Give warning when fvSubnet parameters are passed as those have been moved to the aci_subnet module
if module.params['gateway_ip'] or module.params['subnet_mask'] or module.params['scope']:
module._warnings = ["The support for managing Subnets has been moved to its own module, aci_subnet. \
The new modules still supports 'gateway_ip' and 'subnet_mask' along with more features"]
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{}'.format(tenant),
filter_target='(fvTenant.name, "{}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{}'.format(bd),
filter_target='(fvBD.name, "{}")'.format(bd),
module_object=bd,
),
child_classes=['fvRsCtx', 'fvRsIgmpsn', 'fvRsBDToNdP', 'fvRsBdToEpRet'],
)
aci.get_existing()
if state == 'present':
# Filter out module params with null values
aci.payload(
aci_class='fvBD',
class_config=dict(
arpFlood=arp_flooding,
descr=description,
epClear=endpoint_clear,
epMoveDetectMode=endpoint_move_detect,
ipLearning=ip_learning,
limitIpLearnToSubnets=limit_ip_learn,
mcastAllow=enable_multicast,
multiDstPktAct=multi_dest,
name=bd,
type=bd_type,
unicastRoute=enable_routing,
unkMacUcastAct=l2_unknown_unicast,
unkMcastAct=l3_unknown_multicast,
),
child_configs=[
{'fvRsCtx': {'attributes': {'tnFvCtxName': vrf}}},
{'fvRsIgmpsn': {'attributes': {'tnIgmpSnoopPolName': igmp_snoop_policy}}},
{'fvRsBDToNdP': {'attributes': {'tnNdIfPolName': ipv6_nd_policy}}},
{'fvRsBdToEpRet': {'attributes': {'resolveAct': endpoint_retention_action, 'tnFvEpRetPolName': endpoint_retention_policy}}},
],
)
# generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvBD')
# submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| tsdmgz/ansible | lib/ansible/modules/network/aci/aci_bd.py | Python | gpl-3.0 | 12,055 |
# -*- coding:utf-8 -*-
print("Hello") | okkn/YagiMed | yagimed/main.py | Python | gpl-3.0 | 37 |
#!/usr/bin/env python
import sys
import time
from envirophat import light, weather, motion, analog
def write():
try:
p = round(weather.pressure(),2)
c = light.light()
print('{"light": '+str(c)+', "pressure": '+str(p)+' }')
except KeyboardInterrupt:
pass
write()
| alexellis/docker-arm | images/armhf/python2-envirophat.dev/pressure/pressure.py | Python | gpl-3.0 | 277 |
#
# SPDX-FileCopyrightText: 2017 Dmytro Kolomoiets <[email protected]> and contributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
class NodeSuperimposeTr(object):
def __call__(self, g, node_uid, aug):
conv = {}
for uid in aug:
if uid == aug.get_root():
g[node_uid] = aug[uid]
conv[uid] = node_uid
else:
conv[uid] = g.add_object(aug[uid])
for uid in aug:
for edge in aug.neighbors(uid):
g.add_arrow(conv[uid], conv[edge])
| miur/miur | OLD/miur/graph/transform.py | Python | gpl-3.0 | 554 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Wed May 01 2013
# danilo [dot] bellini [at] gmail [dot] com
"""
Pitch follower via DFT peak with Tkinter GUI
"""
# ------------------------
# AudioLazy pitch follower
# ------------------------
import sys
from audiolazy import (tostream, AudioIO, freq2str, sHz, chunks,
lowpass, envelope, pi, thub, Stream, maverage)
from numpy.fft import rfft
def limiter(sig, threshold=.1, size=256, env=envelope.rms, cutoff=pi/2048):
sig = thub(sig, 2)
return sig * Stream( 1. if el <= threshold else threshold / el
for el in maverage(size)(env(sig, cutoff=cutoff)) )
@tostream
def dft_pitch(sig, size=2048, hop=None):
for blk in Stream(sig).blocks(size=size, hop=hop):
dft_data = rfft(blk)
idx, vmax = max(enumerate(dft_data),
key=lambda el: abs(el[1]) / (2 * el[0] / size + 1)
)
yield 2 * pi * idx / size
def pitch_from_mic(upd_time_in_ms):
rate = 44100
s, Hz = sHz(rate)
api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line
chunks.size = 1 if api == "jack" else 16
with AudioIO(api=api) as recorder:
snd = recorder.record(rate=rate)
sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
hop = int(upd_time_in_ms * 1e-3 * s)
for pitch in freq2str(dft_pitch(sndlow, size=2*hop, hop=hop) / Hz):
yield pitch
# ----------------
# GUI with tkinter
# ----------------
if __name__ == "__main__":
try:
import tkinter
except ImportError:
import Tkinter as tkinter
import threading
import re
# Window (Tk init), text label and button
tk = tkinter.Tk()
tk.title(__doc__.strip().splitlines()[0])
lbldata = tkinter.StringVar(tk)
lbltext = tkinter.Label(tk, textvariable=lbldata, font=("Purisa", 72),
width=10)
lbltext.pack(expand=True, fill=tkinter.BOTH)
btnclose = tkinter.Button(tk, text="Close", command=tk.destroy,
default="active")
btnclose.pack(fill=tkinter.X)
# Needed data
regex_note = re.compile(r"^([A-Gb#]*-?[0-9]*)([?+-]?)(.*?%?)$")
upd_time_in_ms = 200
# Update functions for each thread
def upd_value(): # Recording thread
pitches = iter(pitch_from_mic(upd_time_in_ms))
while not tk.should_finish:
tk.value = next(pitches)
def upd_timer(): # GUI mainloop thread
lbldata.set("\n".join(regex_note.findall(tk.value)[0]))
tk.after(upd_time_in_ms, upd_timer)
# Multi-thread management initialization
tk.should_finish = False
tk.value = freq2str(0) # Starting value
lbldata.set(tk.value)
tk.upd_thread = threading.Thread(target=upd_value)
# Go
tk.upd_thread.start()
tk.after_idle(upd_timer)
tk.mainloop()
tk.should_finish = True
tk.upd_thread.join()
| antiface/audiolazy | examples/dft_pitch.py | Python | gpl-3.0 | 3,525 |
import os
import tempfile
import zipfile
from PyQt5 import QtCore, QtWidgets
import util
from vaults.modvault import utils
FormClass, BaseClass = util.THEME.loadUiType("vaults/modvault/upload.ui")
class UploadModWidget(FormClass, BaseClass):
def __init__(self, parent, modDir, modinfo, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
self.setupUi(self)
self.parent = parent
self.client = self.parent.client # type - ClientWindow
self.modinfo = modinfo
self.modDir = modDir
util.THEME.stylesheets_reloaded.connect(self.load_stylesheet)
self.load_stylesheet()
self.setWindowTitle("Uploading Mod")
self.Name.setText(modinfo.name)
self.Version.setText(str(modinfo.version))
if modinfo.ui_only:
self.isUILabel.setText("is UI Only")
else:
self.isUILabel.setText("not UI Only")
self.UID.setText(modinfo.uid)
self.Description.setPlainText(modinfo.description)
if modinfo.icon != "":
self.IconURI.setText(utils.iconPathToFull(modinfo.icon))
self.updateThumbnail()
else:
self.Thumbnail.setPixmap(
util.THEME.pixmap("games/unknown_map.png"),
)
self.UploadButton.pressed.connect(self.upload)
def load_stylesheet(self):
self.setStyleSheet(util.THEME.readstylesheet("client/client.css"))
@QtCore.pyqtSlot()
def upload(self):
n = self.Name.text()
if any([(i in n) for i in '"<*>|?/\\:']):
QtWidgets.QMessageBox.information(
self.client,
"Invalid Name",
"The mod name contains invalid characters: /\\<>|?:\"",
)
return
iconpath = utils.iconPathToFull(self.modinfo.icon)
infolder = False
if (
iconpath != ""
and (
os.path.commonprefix([
os.path.normcase(self.modDir),
os.path.normcase(iconpath),
])
== os.path.normcase(self.modDir)
)
): # the icon is in the game folder
# localpath = utils.fullPathToIcon(iconpath)
infolder = True
if iconpath != "" and not infolder:
QtWidgets.QMessageBox.information(
self.client,
"Invalid Icon File",
(
"The file {} is not located inside the modfolder. Copy the"
" icon file to your modfolder and change the mod_info.lua "
"accordingly".format(iconpath)
),
)
return
try:
temp = tempfile.NamedTemporaryFile(
mode='w+b', suffix=".zip", delete=False,
)
zipped = zipfile.ZipFile(temp, "w", zipfile.ZIP_DEFLATED)
zipdir(self.modDir, zipped, os.path.basename(self.modDir))
zipped.close()
temp.flush()
except BaseException:
QtWidgets.QMessageBox.critical(
self.client,
"Mod uploading error",
"Something went wrong zipping the mod files.",
)
return
# qfile = QtCore.QFile(temp.name)
# TODO: implement uploading via API
...
@QtCore.pyqtSlot()
def updateThumbnail(self):
iconfilename = utils.iconPathToFull(self.modinfo.icon)
if iconfilename == "":
return False
if os.path.splitext(iconfilename)[1].lower() == ".dds":
old = iconfilename
iconfilename = os.path.join(
self.modDir,
os.path.splitext(os.path.basename(iconfilename))[0] + ".png",
)
succes = utils.generateThumbnail(old, iconfilename)
if not succes:
QtWidgets.QMessageBox.information(
self.client,
"Invalid Icon File",
(
"Because FAF can't read DDS files, it tried to convert"
" it to a png. This failed. Try something else"
),
)
return False
try:
self.Thumbnail.setPixmap(util.THEME.pixmap(iconfilename, False))
except BaseException:
QtWidgets.QMessageBox.information(
self.client,
"Invalid Icon File",
"This was not a valid icon file. Please pick a png or jpeg",
)
return False
self.modinfo.thumbnail = utils.fullPathToIcon(iconfilename)
self.IconURI.setText(iconfilename)
return True
def zipdir(path, zipf, fname):
# zips the entire directory path to zipf. Every file in the zipfile starts
# with fname. So if path is "/foo/bar/hello" and fname is "test" then every
# file in zipf is of the form "/test/*.*"
path = os.path.normcase(path)
if path[-1] in r'\/':
path = path[:-1]
for root, dirs, files in os.walk(path):
for f in files:
name = os.path.join(os.path.normcase(root), f)
n = name[len(os.path.commonprefix([name, path])):]
if n[0] == "\\":
n = n[1:]
zipf.write(name, os.path.join(fname, n))
| FAForever/client | src/vaults/modvault/uploadwidget.py | Python | gpl-3.0 | 5,368 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2013 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.shortcuts import render_to_response, get_object_or_404
from django.views.decorators.cache import cache_page
from weblate.trans import appsettings
from django.core.servers.basehttp import FileWrapper
from django.utils.translation import ugettext as _
import django.utils.translation
from django.template import RequestContext, loader
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseNotFound, Http404
)
from django.contrib import messages
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q, Count, Sum
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.utils.safestring import mark_safe
from weblate.trans.models import (
Project, SubProject, Translation, Unit, Suggestion, Check,
Dictionary, Change, Comment, get_versions
)
from weblate.lang.models import Language
from weblate.trans.checks import CHECKS
from weblate.trans.forms import (
TranslationForm, UploadForm, SimpleUploadForm, ExtraUploadForm, SearchForm,
MergeForm, AutoForm, WordForm, DictUploadForm, ReviewForm, LetterForm,
AntispamForm, CommentForm
)
from weblate.trans.util import join_plural
from weblate.accounts.models import Profile, send_notification_email
import weblate
from whoosh.analysis import StandardAnalyzer, StemmingAnalyzer
import datetime
import logging
import os.path
import json
import csv
from xml.etree import ElementTree
import urllib2
# See https://code.djangoproject.com/ticket/6027
class FixedFileWrapper(FileWrapper):
def __iter__(self):
self.filelike.seek(0)
return self
logger = logging.getLogger('weblate')
def home(request):
'''
Home page of Weblate showing list of projects, stats
and user links if logged in.
'''
projects = Project.objects.all_acl(request.user)
acl_projects = projects
if projects.count() == 1:
projects = SubProject.objects.filter(project=projects[0])
# Warn about not filled in username (usually caused by migration of
# users from older system
if not request.user.is_anonymous() and request.user.get_full_name() == '':
messages.warning(
request,
_('Please set your full name in your profile.')
)
# Load user translations if user is authenticated
usertranslations = None
if request.user.is_authenticated():
profile = request.user.get_profile()
usertranslations = Translation.objects.filter(
language__in=profile.languages.all()
).order_by(
'subproject__project__name', 'subproject__name'
)
# Some stats
top_translations = Profile.objects.order_by('-translated')[:10]
top_suggestions = Profile.objects.order_by('-suggested')[:10]
last_changes = Change.objects.filter(
translation__subproject__project__in=acl_projects,
).order_by( '-timestamp')[:10]
return render_to_response('index.html', RequestContext(request, {
'projects': projects,
'top_translations': top_translations,
'top_suggestions': top_suggestions,
'last_changes': last_changes,
'last_changes_rss': reverse('rss'),
'usertranslations': usertranslations,
}))
def show_checks(request):
'''
List of failing checks.
'''
allchecks = Check.objects.filter(
ignore=False
).values('check').annotate(count=Count('id'))
return render_to_response('checks.html', RequestContext(request, {
'checks': allchecks,
'title': _('Failing checks'),
}))
def show_check(request, name):
'''
Details about failing check.
'''
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
checks = Check.objects.filter(
check=name, ignore=False
).values('project__slug').annotate(count=Count('id'))
return render_to_response('check.html', RequestContext(request, {
'checks': checks,
'title': check.name,
'check': check,
}))
def show_check_project(request, name, project):
'''
Show checks failing in a project.
'''
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
units = Unit.objects.none()
if check.target:
langs = Check.objects.filter(
check=name, project=prj, ignore=False
).values_list('language', flat=True).distinct()
for lang in langs:
checks = Check.objects.filter(
check=name, project=prj, language=lang, ignore=False
).values_list('checksum', flat=True)
res = Unit.objects.filter(
checksum__in=checks,
translation__language=lang,
translation__subproject__project=prj,
translated=True
).values(
'translation__subproject__slug',
'translation__subproject__project__slug'
).annotate(count=Count('id'))
units |= res
if check.source:
checks = Check.objects.filter(
check=name,
project=prj,
language=None,
ignore=False
).values_list(
'checksum', flat=True
)
for subproject in prj.subproject_set.all():
lang = subproject.translation_set.all()[0].language
res = Unit.objects.filter(
checksum__in=checks,
translation__language=lang,
translation__subproject=subproject
).values(
'translation__subproject__slug',
'translation__subproject__project__slug'
).annotate(count=Count('id'))
units |= res
return render_to_response('check_project.html', RequestContext(request, {
'checks': units,
'title': '%s/%s' % (prj.__unicode__(), check.name),
'check': check,
'project': prj,
}))
def show_check_subproject(request, name, project, subproject):
'''
Show checks failing in a subproject.
'''
subprj = get_object_or_404(
SubProject,
slug=subproject,
project__slug=project
)
subprj.check_acl(request)
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
units = Unit.objects.none()
if check.target:
langs = Check.objects.filter(
check=name,
project=subprj.project,
ignore=False
).values_list(
'language', flat=True
).distinct()
for lang in langs:
checks = Check.objects.filter(
check=name,
project=subprj.project,
language=lang,
ignore=False
).values_list('checksum', flat=True)
res = Unit.objects.filter(
translation__subproject=subprj,
checksum__in=checks,
translation__language=lang,
translated=True
).values(
'translation__language__code'
).annotate(count=Count('id'))
units |= res
source_checks = []
if check.source:
checks = Check.objects.filter(
check=name, project=subprj.project,
language=None,
ignore=False
).values_list('checksum', flat=True)
lang = subprj.translation_set.all()[0].language
res = Unit.objects.filter(
translation__subproject=subprj,
checksum__in=checks,
translation__language=lang
).count()
if res > 0:
source_checks.append(res)
return render_to_response(
'check_subproject.html',
RequestContext(request, {
'checks': units,
'source_checks': source_checks,
'anychecks': len(units) + len(source_checks) > 0,
'title': '%s/%s' % (subprj.__unicode__(), check.name),
'check': check,
'subproject': subprj,
})
)
def show_languages(request):
return render_to_response('languages.html', RequestContext(request, {
'languages': Language.objects.have_translation(),
'title': _('Languages'),
}))
def show_language(request, lang):
obj = get_object_or_404(Language, code=lang)
last_changes = Change.objects.filter(
translation__language=obj
).order_by('-timestamp')[:10]
dicts = Dictionary.objects.filter(
language=obj
).values_list('project', flat=True).distinct()
return render_to_response('language.html', RequestContext(request, {
'object': obj,
'last_changes': last_changes,
'last_changes_rss': reverse('rss-language', kwargs={'lang': obj.code}),
'dicts': Project.objects.filter(id__in=dicts),
}))
def show_dictionaries(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
dicts = Translation.objects.filter(
subproject__project=obj
).values_list('language', flat=True).distinct()
return render_to_response('dictionaries.html', RequestContext(request, {
'title': _('Dictionaries'),
'dicts': Language.objects.filter(id__in=dicts),
'project': obj,
}))
@login_required
@permission_required('trans.change_dictionary')
def edit_dictionary(request, project, lang):
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
lang = get_object_or_404(Language, code=lang)
word = get_object_or_404(
Dictionary,
project=prj,
language=lang,
id=request.GET.get('id')
)
if request.method == 'POST':
form = WordForm(request.POST)
if form.is_valid():
word.source = form.cleaned_data['source']
word.target = form.cleaned_data['target']
word.save()
return HttpResponseRedirect(reverse(
'weblate.trans.views.show_dictionary',
kwargs={'project': prj.slug, 'lang': lang.code}
))
else:
form = WordForm(
initial={'source': word.source, 'target': word.target}
)
return render_to_response('edit_dictionary.html', RequestContext(request, {
'title': _('%(language)s dictionary for %(project)s') %
{'language': lang, 'project': prj},
'project': prj,
'language': lang,
'form': form,
}))
@login_required
@permission_required('trans.delete_dictionary')
def delete_dictionary(request, project, lang):
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
lang = get_object_or_404(Language, code=lang)
word = get_object_or_404(
Dictionary,
project=prj,
language=lang,
id=request.POST.get('id')
)
word.delete()
return HttpResponseRedirect(reverse(
'weblate.trans.views.show_dictionary',
kwargs={'project': prj.slug, 'lang': lang.code})
)
@login_required
@permission_required('trans.upload_dictionary')
def upload_dictionary(request, project, lang):
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
lang = get_object_or_404(Language, code=lang)
if request.method == 'POST':
form = DictUploadForm(request.POST, request.FILES)
if form.is_valid():
try:
count = Dictionary.objects.upload(
prj,
lang,
request.FILES['file'],
form.cleaned_data['overwrite']
)
if count == 0:
messages.warning(
request,
_('No words to import found in file.')
)
else:
messages.info(
request,
_('Imported %d words from file.') % count
)
except Exception as e:
messages.error(
request,
_('File content merge failed: %s' % unicode(e))
)
else:
messages.error(request, _('Failed to process form!'))
else:
messages.error(request, _('Failed to process form!'))
return HttpResponseRedirect(reverse(
'weblate.trans.views.show_dictionary',
kwargs={'project': prj.slug, 'lang': lang.code}
))
def download_dictionary(request, project, lang):
'''
Exports dictionary.
'''
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
lang = get_object_or_404(Language, code=lang)
# Parse parameters
export_format = None
if 'format' in request.GET:
export_format = request.GET['format']
if not export_format in ['csv', 'po']:
export_format = 'csv'
# Grab all words
words = Dictionary.objects.filter(
project=prj,
language=lang
).order_by('source')
if export_format == 'csv':
response = HttpResponse(mimetype='text/csv; charset=utf-8')
filename = 'dictionary-%s-%s.csv' % (prj.slug, lang.code)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
writer = csv.writer(response)
for word in words.iterator():
writer.writerow((
word.source.encode('utf8'), word.target.encode('utf8')
))
return response
elif export_format == 'po':
from translate.storage.po import pounit, pofile
response = HttpResponse(mimetype='text/x-po; charset=utf-8')
filename = 'dictionary-%s-%s.po' % (prj.slug, lang.code)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
store = pofile()
site = Site.objects.get_current()
store.updateheader(
add=True,
language=lang.code,
x_generator='Weblate %s' % weblate.VERSION,
project_id_version='%s dictionary for %s' % (lang.name, prj.name),
language_team='%s <http://%s%s>' % (
lang.name,
site.domain,
reverse(
'weblate.trans.views.show_dictionary',
kwargs={'project': prj.slug, 'lang': lang.code}
),
)
)
for word in words.iterator():
unit = pounit(word.source)
unit.target = word.target
store.addunit(unit)
store.savefile(response)
return response
def show_dictionary(request, project, lang):
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
lang = get_object_or_404(Language, code=lang)
if (request.method == 'POST'
and request.user.has_perm('trans.add_dictionary')):
form = WordForm(request.POST)
if form.is_valid():
Dictionary.objects.create(
project=prj,
language=lang,
source=form.cleaned_data['source'],
target=form.cleaned_data['target']
)
return HttpResponseRedirect(request.get_full_path())
else:
form = WordForm()
uploadform = DictUploadForm()
words = Dictionary.objects.filter(
project=prj, language=lang
).order_by('source')
limit = request.GET.get('limit', 25)
page = request.GET.get('page', 1)
letterform = LetterForm(request.GET)
if letterform.is_valid() and letterform.cleaned_data['letter'] != '':
words = words.filter(
source__istartswith=letterform.cleaned_data['letter']
)
letter = letterform.cleaned_data['letter']
else:
letter = ''
paginator = Paginator(words, limit)
try:
words = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
words = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
words = paginator.page(paginator.num_pages)
return render_to_response('dictionary.html', RequestContext(request, {
'title': _('%(language)s dictionary for %(project)s') %
{'language': lang, 'project': prj},
'project': prj,
'language': lang,
'words': words,
'form': form,
'uploadform': uploadform,
'letterform': letterform,
'letter': letter,
}))
def show_engage(request, project, lang=None):
# Get project object
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
# Handle language parameter
language = None
if lang is not None:
try:
django.utils.translation.activate(lang)
except:
# Ignore failure on activating language
pass
try:
language = Language.objects.get(code=lang)
except Language.DoesNotExist:
pass
context = {
'object': obj,
'project': obj.name,
'languages': obj.get_language_count(),
'total': obj.get_total(),
'percent': obj.get_translated_percent(language),
'url': obj.get_absolute_url(),
'language': language,
}
# Render text
if language is None:
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> '
'currently contains %(total)s strings for translation and is '
'<a href="%(url)s">being translated into %(languages)s languages'
'</a>. Overall, these translations are %(percent)s%% complete.'
)
else:
# Translators: line of text in engagement widget, please use your
# language name instead of English
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> into '
'English currently contains %(total)s strings for translation and '
'is %(percent)s%% complete.'
)
if 'English' in status_text:
status_text = status_text.replace('English', language.name)
context['status_text'] = mark_safe(status_text % context)
return render_to_response('engage.html', RequestContext(request, context))
def show_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
dicts = Dictionary.objects.filter(
project=obj
).values_list(
'language', flat=True
).distinct()
last_changes = Change.objects.filter(
translation__subproject__project=obj
).order_by('-timestamp')[:10]
return render_to_response('project.html', RequestContext(request, {
'object': obj,
'dicts': Language.objects.filter(id__in=dicts),
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-project',
kwargs={'project': obj.slug}
),
}))
def show_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
last_changes = Change.objects.filter(
translation__subproject=obj
).order_by('-timestamp')[:10]
return render_to_response('subproject.html', RequestContext(request, {
'object': obj,
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-subproject',
kwargs={'subproject': obj.slug, 'project': obj.project.slug}
),
}))
@login_required
@permission_required('trans.automatic_translation')
def auto_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
obj.commit_pending()
autoform = AutoForm(obj, request.POST)
change = None
if not obj.subproject.locked and autoform.is_valid():
if autoform.cleaned_data['inconsistent']:
units = obj.unit_set.filter_type('inconsistent', obj)
elif autoform.cleaned_data['overwrite']:
units = obj.unit_set.all()
else:
units = obj.unit_set.filter(translated=False)
sources = Unit.objects.filter(
translation__language=obj.language,
translated=True
)
if autoform.cleaned_data['subproject'] == '':
sources = sources.filter(
translation__subproject__project=obj.subproject.project
).exclude(
translation=obj
)
else:
subprj = SubProject.objects.get(
project=obj.subproject.project,
slug=autoform.cleaned_data['subproject']
)
sources = sources.filter(translation__subproject=subprj)
for unit in units.iterator():
update = sources.filter(checksum=unit.checksum)
if update.exists():
# Get first entry
update = update[0]
# No save if translation is same
if unit.fuzzy == update.fuzzy and unit.target == update.target:
continue
# Copy translation
unit.fuzzy = update.fuzzy
unit.target = update.target
# Create signle change object for whole merge
if change is None:
change = Change.objects.create(
unit=unit,
translation=unit.translation,
user=request.user
)
# Save unit to backend
unit.save_backend(request, False, False)
messages.info(request, _('Automatic translation completed.'))
else:
messages.error(request, _('Failed to process form!'))
return HttpResponseRedirect(obj.get_absolute_url())
def review_source(request, project, subproject):
'''
Listing of source strings to review.
'''
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
if not obj.translation_set.exists():
raise Http404('No translation exists in this subproject.')
# Grab first translation in subproject
# (this assumes all have same source strings)
source = obj.translation_set.all()[0]
# Grab search type and page number
rqtype = request.GET.get('type', 'all')
limit = request.GET.get('limit', 50)
page = request.GET.get('page', 1)
# Fiter units
sources = source.unit_set.filter_type(rqtype, source)
paginator = Paginator(sources, limit)
try:
sources = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
sources = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
sources = paginator.page(paginator.num_pages)
return render_to_response('source-review.html', RequestContext(request, {
'object': obj,
'source': source,
'sources': sources,
'title': _('Review source strings in %s') % obj.__unicode__(),
}))
def show_source(request, project, subproject):
'''
Show source strings summary and checks.
'''
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
if not obj.translation_set.exists():
raise Http404('No translation exists in this subproject.')
# Grab first translation in subproject
# (this assumes all have same source strings)
source = obj.translation_set.all()[0]
return render_to_response('source.html', RequestContext(request, {
'object': obj,
'source': source,
'title': _('Source strings in %s') % obj.__unicode__(),
}))
def show_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
last_changes = Change.objects.filter(
translation=obj
).order_by('-timestamp')[:10]
# Check locks
obj.is_locked(request)
# How much is user allowed to configure upload?
if request.user.has_perm('trans.author_translation'):
form = ExtraUploadForm()
elif request.user.has_perm('trans.overwrite_translation'):
form = UploadForm()
else:
form = SimpleUploadForm()
# Is user allowed to do automatic translation?
if request.user.has_perm('trans.automatic_translation'):
autoform = AutoForm(obj)
else:
autoform = None
# Search form for everybody
search_form = SearchForm()
# Review form for logged in users
if request.user.is_anonymous():
review_form = None
else:
review_form = ReviewForm(
initial={
'date': datetime.date.today() - datetime.timedelta(days=31)
}
)
return render_to_response('translation.html', RequestContext(request, {
'object': obj,
'form': form,
'autoform': autoform,
'search_form': search_form,
'review_form': review_form,
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-translation',
kwargs={
'lang': obj.language.code,
'subproject': obj.subproject.slug,
'project': obj.subproject.project.slug
}
),
}))
@login_required
@permission_required('trans.commit_translation')
def commit_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
obj.commit_pending()
messages.info(request, _('All pending translations were committed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.commit_translation')
def commit_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
obj.commit_pending()
messages.info(request, _('All pending translations were committed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.commit_translation')
def commit_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
obj.commit_pending()
messages.info(request, _('All pending translations were committed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.update_translation')
def update_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
if obj.do_update(request):
messages.info(request, _('All repositories were updated.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.update_translation')
def update_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
if obj.do_update(request):
messages.info(request, _('All repositories were updated.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.update_translation')
def update_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if obj.do_update(request):
messages.info(request, _('All repositories were updated.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.push_translation')
def push_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
if obj.do_push(request):
messages.info(request, _('All repositories were pushed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.push_translation')
def push_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
if obj.do_push(request):
messages.info(request, _('All repositories were pushed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.push_translation')
def push_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if obj.do_push(request):
messages.info(request, _('All repositories were pushed.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.reset_translation')
def reset_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
if obj.do_reset(request):
messages.info(request, _('All repositories have been reset.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.reset_translation')
def reset_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
if obj.do_reset(request):
messages.info(request, _('All repositories have been reset.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.reset_translation')
def reset_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if obj.do_reset(request):
messages.info(request, _('All repositories have been reset.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.lock_translation')
def lock_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if not obj.is_user_locked(request):
obj.create_lock(request.user, True)
messages.info(request, _('Translation is now locked for you.'))
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
def update_lock(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if not obj.is_user_locked(request):
obj.update_lock_time()
return HttpResponse('ok')
@login_required
@permission_required('trans.lock_translation')
def unlock_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if not obj.is_user_locked(request):
obj.create_lock(None)
messages.info(
request,
_('Translation is now open for translation updates.')
)
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.lock_subproject')
def lock_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
obj.commit_pending()
obj.locked = True
obj.save()
messages.info(
request,
_('Subproject is now locked for translation updates!')
)
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.lock_subproject')
def unlock_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
obj.locked = False
obj.save()
messages.info(
request,
_('Subproject is now open for translation updates.')
)
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.lock_subproject')
def lock_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
obj.commit_pending()
for subproject in obj.subproject_set.all():
subproject.locked = True
subproject.save()
messages.info(
request,
_('All subprojects are now locked for translation updates!')
)
return HttpResponseRedirect(obj.get_absolute_url())
@login_required
@permission_required('trans.lock_subproject')
def unlock_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
for subproject in obj.subproject_set.all():
subproject.locked = False
subproject.save()
messages.info(request, _('Project is now open for translation updates.'))
return HttpResponseRedirect(obj.get_absolute_url())
def download_translation(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
# Retrieve ttkit store to get extension and mime type
store = obj.get_store()
srcfilename = obj.get_filename()
if store.Mimetypes is None:
# Properties files do not expose mimetype
mime = 'text/plain'
else:
mime = store.Mimetypes[0]
if store.Extensions is None:
# Typo in translate-toolkit 1.9, see
# https://github.com/translate/translate/pull/10
if hasattr(store, 'Exensions'):
ext = store.Exensions[0]
else:
ext = 'txt'
else:
ext = store.Extensions[0]
# Construct file name (do not use real filename as it is usually not
# that useful)
filename = '%s-%s-%s.%s' % (project, subproject, lang, ext)
# Django wrapper for sending file
wrapper = FixedFileWrapper(file(srcfilename))
response = HttpResponse(wrapper, mimetype=mime)
# Fill in response headers
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = os.path.getsize(srcfilename)
return response
def bool2str(val):
if val:
return 'on'
return ''
def parse_search_url(request):
# Check where we are
rqtype = request.REQUEST.get('type', 'all')
direction = request.REQUEST.get('dir', 'forward')
pos = request.REQUEST.get('pos', '-1')
try:
pos = int(pos)
except:
pos = -1
# Pre-process search form
if request.method == 'POST':
search_form = SearchForm(request.POST)
else:
search_form = SearchForm(request.GET)
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_type = search_form.cleaned_data['search']
if search_type == '':
search_type = 'ftx'
search_source = search_form.cleaned_data['src']
search_target = search_form.cleaned_data['tgt']
search_context = search_form.cleaned_data['ctx']
# Sane defaults
if not search_context and not search_source and not search_target:
search_source = True
search_target = True
search_url = '&q=%s&src=%s&tgt=%s&ctx=%s&search=%s' % (
search_query,
bool2str(search_source),
bool2str(search_target),
bool2str(search_context),
search_type,
)
else:
search_query = ''
search_type = 'ftx'
search_source = True
search_target = True
search_context = False
search_url = ''
if 'date' in request.REQUEST:
search_url += '&date=%s' % request.REQUEST['date']
return (
rqtype,
direction,
pos,
search_query,
search_type,
search_source,
search_target,
search_context,
search_url
)
def get_filter_name(rqtype, search_query):
'''
Returns name of current filter.
'''
if search_query != '':
return _('Search for "%s"') % search_query
if rqtype == 'all':
return None
elif rqtype == 'fuzzy':
return _('Fuzzy strings')
elif rqtype == 'untranslated':
return _('Untranslated strings')
elif rqtype == 'suggestions':
return _('Strings with suggestions')
elif rqtype == 'allchecks':
return _('Strings with any failing checks')
elif rqtype in CHECKS:
return CHECKS[rqtype].name
else:
return None
def translate(request, project, subproject, lang):
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
# Check locks
project_locked, user_locked, own_lock = obj.is_locked(request, True)
locked = project_locked or user_locked
if request.user.is_authenticated():
profile = request.user.get_profile()
antispam = None
else:
profile = None
antispam = AntispamForm()
secondary = None
unit = None
rqtype, direction, pos, search_query, search_type, search_source, search_target, search_context, search_url = parse_search_url(request)
# Any form submitted?
if request.method == 'POST':
# Antispam protection
if not request.user.is_authenticated():
antispam = AntispamForm(request.POST)
if not antispam.is_valid():
# Silently redirect to next entry
return HttpResponseRedirect('%s?type=%s&pos=%d%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
form = TranslationForm(request.POST)
if form.is_valid() and not project_locked:
# Check whether translation is not outdated
obj.check_sync()
try:
try:
unit = Unit.objects.get(
checksum=form.cleaned_data['checksum'],
translation=obj
)
except Unit.MultipleObjectsReturned:
# Possible temporary inconsistency caused by ongoing update
# of repo, let's pretend everyting is okay
unit = Unit.objects.filter(
checksum=form.cleaned_data['checksum'],
translation=obj
)[0]
if 'suggest' in request.POST:
# Handle suggesion saving
user = request.user
if isinstance(user, AnonymousUser):
user = None
if form.cleaned_data['target'] == len(form.cleaned_data['target']) * ['']:
messages.error(request, _('Your suggestion is empty!'))
# Stay on same entry
return HttpResponseRedirect(
'%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
)
)
# Create the suggestion
sug = Suggestion.objects.create(
target=join_plural(form.cleaned_data['target']),
checksum=unit.checksum,
language=unit.translation.language,
project=unit.translation.subproject.project,
user=user)
# Record in change
Change.objects.create(
unit=unit,
action=Change.ACTION_SUGGESTION,
translation=unit.translation,
user=user
)
# Invalidate counts cache
unit.translation.invalidate_cache('suggestions')
# Invite user to become translator if there is nobody else
recent_changes = Change.objects.content().filter(
translation=unit.translation,
).exclude(
user=None
).order_by('-timestamp')
if recent_changes.count() == 0 or True:
messages.info(
request,
_('There is currently no active translator for this translation, please consider becoming a translator as your suggestion might otherwise remain unreviewed.')
)
# Notify subscribed users
subscriptions = Profile.objects.subscribed_new_suggestion(
obj.subproject.project,
obj.language,
request.user
)
for subscription in subscriptions:
subscription.notify_new_suggestion(obj, sug, unit)
# Update suggestion stats
if profile is not None:
profile.suggested += 1
profile.save()
elif not request.user.is_authenticated():
# We accept translations only from authenticated
messages.error(
request,
_('You need to log in to be able to save translations!')
)
elif not request.user.has_perm('trans.save_translation'):
# Need privilege to save
messages.error(
request,
_('You don\'t have privileges to save translations!')
)
elif not user_locked:
# Remember old checks
oldchecks = set(
unit.active_checks().values_list('check', flat=True)
)
# Update unit and save it
unit.target = join_plural(form.cleaned_data['target'])
unit.fuzzy = form.cleaned_data['fuzzy']
saved = unit.save_backend(request)
if saved:
# Get new set of checks
newchecks = set(
unit.active_checks().values_list('check', flat=True)
)
# Did we introduce any new failures?
if newchecks > oldchecks:
# Show message to user
messages.error(
request,
_('Some checks have failed on your translation!')
)
# Stay on same entry
return HttpResponseRedirect(
'%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
)
)
# Redirect to next entry
return HttpResponseRedirect('%s?type=%s&pos=%d%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
except Unit.DoesNotExist:
logger.error(
'message %s disappeared!',
form.cleaned_data['checksum']
)
messages.error(
request,
_('Message you wanted to translate is no longer available!')
)
# Handle translation merging
if 'merge' in request.GET and not locked:
if not request.user.has_perm('trans.save_translation'):
# Need privilege to save
messages.error(
request,
_('You don\'t have privileges to save translations!')
)
else:
try:
mergeform = MergeForm(request.GET)
if mergeform.is_valid():
try:
unit = Unit.objects.get(
checksum=mergeform.cleaned_data['checksum'],
translation=obj
)
except Unit.MultipleObjectsReturned:
# Possible temporary inconsistency caused by ongoing
# update of repo, let's pretend everyting is okay
unit = Unit.objects.filter(
checksum=mergeform.cleaned_data['checksum'],
translation=obj
)[0]
merged = Unit.objects.get(
pk=mergeform.cleaned_data['merge']
)
if unit.checksum != merged.checksum:
messages.error(
request,
_('Can not merge different messages!')
)
else:
# Store unit
unit.target = merged.target
unit.fuzzy = merged.fuzzy
saved = unit.save_backend(request)
# Update stats if there was change
if saved:
profile.translated += 1
profile.save()
# Redirect to next entry
return HttpResponseRedirect('%s?type=%s&pos=%d%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
except Unit.DoesNotExist:
logger.error(
'message %s disappeared!',
form.cleaned_data['checksum']
)
messages.error(
request,
_('Message you wanted to translate is no longer available!')
)
# Handle accepting/deleting suggestions
if not locked and ('accept' in request.GET or 'delete' in request.GET):
# Check for authenticated users
if not request.user.is_authenticated():
messages.error(request, _('You need to log in to be able to manage suggestions!'))
return HttpResponseRedirect('%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
# Parse suggestion ID
if 'accept' in request.GET:
if not request.user.has_perm('trans.accept_suggestion'):
messages.error(request, _('You do not have privilege to accept suggestions!'))
return HttpResponseRedirect('%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
sugid = request.GET['accept']
else:
if not request.user.has_perm('trans.delete_suggestion'):
messages.error(request, _('You do not have privilege to delete suggestions!'))
return HttpResponseRedirect('%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
sugid = request.GET['delete']
try:
sugid = int(sugid)
suggestion = Suggestion.objects.get(pk=sugid)
except:
suggestion = None
if suggestion is not None:
if 'accept' in request.GET:
# Accept suggesiont
suggestion.accept(request)
# Invalidate caches
for unit in Unit.objects.filter(checksum=suggestion.checksum):
unit.translation.invalidate_cache('suggestions')
# Delete suggestion in both cases (accepted ones are no longer
# needed)
suggestion.delete()
else:
messages.error(request, _('Invalid suggestion!'))
# Redirect to same entry for possible editing
return HttpResponseRedirect('%s?type=%s&pos=%d&dir=stay%s' % (
obj.get_translate_url(),
rqtype,
pos,
search_url
))
reviewform = ReviewForm(request.GET)
if reviewform.is_valid():
allunits = obj.unit_set.review(
reviewform.cleaned_data['date'],
request.user
)
# Review
if direction == 'stay':
units = allunits.filter(position=pos)
elif direction == 'back':
units = allunits.filter(position__lt=pos).order_by('-position')
else:
units = allunits.filter(position__gt=pos)
elif search_query != '':
# Apply search conditions
if search_type == 'exact':
query = Q()
if search_source:
query |= Q(source=search_query)
if search_target:
query |= Q(target=search_query)
if search_context:
query |= Q(context=search_query)
allunits = obj.unit_set.filter(query)
elif search_type == 'substring':
query = Q()
if search_source:
query |= Q(source__icontains=search_query)
if search_target:
query |= Q(target__icontains=search_query)
if search_context:
query |= Q(context__icontains=search_query)
allunits = obj.unit_set.filter(query)
else:
allunits = obj.unit_set.search(
search_query,
search_source,
search_context,
search_target
)
if direction == 'stay':
units = obj.unit_set.filter(position=pos)
elif direction == 'back':
units = allunits.filter(position__lt=pos).order_by('-position')
else:
units = allunits.filter(position__gt=pos)
elif 'checksum' in request.GET:
allunits = obj.unit_set.filter(checksum=request.GET['checksum'])
units = allunits
else:
allunits = obj.unit_set.filter_type(rqtype, obj)
# What unit set is about to show
if direction == 'stay':
units = obj.unit_set.filter(position=pos)
elif direction == 'back':
units = allunits.filter(position__lt=pos).order_by('-position')
else:
units = allunits.filter(position__gt=pos)
# If we failed to get unit above or on no POST
if unit is None:
# Grab actual unit
try:
unit = units[0]
except IndexError:
messages.info(request, _('You have reached end of translating.'))
return HttpResponseRedirect(obj.get_absolute_url())
# Show secondary languages for logged in users
if profile:
secondary_langs = profile.secondary_languages.exclude(
id=unit.translation.language.id
)
project = unit.translation.subproject.project
secondary = Unit.objects.filter(
checksum=unit.checksum,
translated=True,
translation__subproject__project=project,
translation__language__in=secondary_langs,
)
# distinct('target') works with Django 1.4 so let's emulate that
# based on presumption we won't get too many results
targets = {}
res = []
for lang in secondary:
if lang.target in targets:
continue
targets[lang.target] = 1
res.append(lang)
secondary = res
# Prepare form
form = TranslationForm(initial={
'checksum': unit.checksum,
'target': (unit.translation.language, unit.get_target_plurals()),
'fuzzy': unit.fuzzy,
})
total = obj.unit_set.all().count()
filter_count = allunits.count()
return render_to_response(
'translate.html',
RequestContext(request, {
'object': obj,
'unit': unit,
'last_changes': unit.change_set.all()[:10],
'total': total,
'type': rqtype,
'filter_name': get_filter_name(rqtype, search_query),
'filter_count': filter_count,
'filter_pos': filter_count + 1 - units.count(),
'form': form,
'antispam': antispam,
'comment_form': CommentForm(),
'target_language': obj.language.code.replace('_', '-').lower(),
'update_lock': own_lock,
'secondary': secondary,
'search_query': search_query,
'search_url': search_url,
'search_source': bool2str(search_source),
'search_type': search_type,
'search_target': bool2str(search_target),
'search_context': bool2str(search_context),
'locked': locked,
'user_locked': user_locked,
'project_locked': project_locked,
},
))
@login_required
def comment(request, pk):
'''
Adds new comment.
'''
obj = get_object_or_404(Unit, pk=pk)
obj.check_acl(request)
if request.POST.get('type', '') == 'source':
lang = None
else:
lang = obj.translation.language
form = CommentForm(request.POST)
if form.is_valid():
new_comment = Comment.objects.create(
user=request.user,
checksum=obj.checksum,
project=obj.translation.subproject.project,
comment=form.cleaned_data['comment'],
language=lang
)
Change.objects.create(
unit=obj,
action=Change.ACTION_COMMENT,
translation=obj.translation,
user=request.user
)
# Invalidate counts cache
if lang is None:
obj.translation.invalidate_cache('sourcecomments')
else:
obj.translation.invalidate_cache('targetcomments')
messages.info(request, _('Posted new comment'))
# Notify subscribed users
subscriptions = Profile.objects.subscribed_new_comment(
obj.translation.subproject.project,
lang,
request.user
)
for subscription in subscriptions:
subscription.notify_new_comment(obj, new_comment)
# Notify upstream
if lang is None and obj.translation.subproject.report_source_bugs != '':
send_notification_email(
'en',
obj.translation.subproject.report_source_bugs,
'new_comment',
obj.translation,
{
'unit': obj,
'comment': new_comment,
'subproject': obj.translation.subproject,
},
from_email=request.user.email,
)
else:
messages.error(request, _('Failed to add comment!'))
return HttpResponseRedirect(obj.get_absolute_url())
def get_string(request, checksum):
'''
AJAX handler for getting raw string.
'''
units = Unit.objects.filter(checksum=checksum)
if units.count() == 0:
return HttpResponse('')
units[0].check_acl(request)
return HttpResponse(units[0].get_source_plurals()[0])
def get_similar(request, unit_id):
'''
AJAX handler for getting similar strings.
'''
unit = get_object_or_404(Unit, pk=int(unit_id))
unit.check_acl(request)
similar_units = Unit.objects.similar(unit)
# distinct('target') works with Django 1.4 so let's emulate that
# based on presumption we won't get too many results
targets = {}
res = []
for similar in similar_units:
if similar.target in targets:
continue
targets[similar.target] = 1
res.append(similar)
similar = res
return render_to_response('js/similar.html', RequestContext(request, {
'similar': similar,
}))
def get_other(request, unit_id):
'''
AJAX handler for same strings in other subprojects.
'''
unit = get_object_or_404(Unit, pk=int(unit_id))
unit.check_acl(request)
other = Unit.objects.same(unit)
rqtype, direction, pos, search_query, search_type, search_source, search_target, search_context, search_url = parse_search_url(request)
return render_to_response('js/other.html', RequestContext(request, {
'other': other,
'unit': unit,
'type': rqtype,
'search_url': search_url,
}))
def get_dictionary(request, unit_id):
'''
Lists words from dictionary for current translation.
'''
unit = get_object_or_404(Unit, pk=int(unit_id))
unit.check_acl(request)
words = set()
# Prepare analyzers
# - standard analyzer simply splits words
# - stemming extracts stems, to catch things like plurals
analyzers = (StandardAnalyzer(), StemmingAnalyzer())
# Extract words from all plurals and from context
for text in unit.get_source_plurals() + [unit.context]:
for analyzer in analyzers:
words = words.union([token.text for token in analyzer(text)])
# Grab all words in the dictionary
dictionary = Dictionary.objects.filter(
project = unit.translation.subproject.project,
language = unit.translation.language
)
if len(words) == 0:
# No extracted words, no dictionary
dictionary = dictionary.none()
else:
# Build the query (can not use __in as we want case insensitive lookup)
query = Q()
for word in words:
query |= Q(source__iexact=word)
# Filter dictionary
dictionary = dictionary.filter(query)
return render_to_response('js/dictionary.html', RequestContext(request, {
'dictionary': dictionary,
}))
@login_required
@permission_required('trans.ignore_check')
def ignore_check(request, check_id):
obj = get_object_or_404(Check, pk=int(check_id))
obj.project.check_acl(request)
# Mark check for ignoring
obj.ignore = True
obj.save()
# Invalidate caches
for unit in Unit.objects.filter(checksum=obj.checksum):
unit.translation.invalidate_cache()
# response for AJAX
return HttpResponse('ok')
@login_required
@permission_required('trans.upload_translation')
def upload_translation(request, project, subproject, lang):
'''
Handling of translation uploads.
'''
obj = get_object_or_404(
Translation,
language__code=lang,
subproject__slug=subproject,
subproject__project__slug=project,
enabled=True
)
obj.check_acl(request)
if not obj.is_locked(request) and request.method == 'POST':
if request.user.has_perm('trans.author_translation'):
form = ExtraUploadForm(request.POST, request.FILES)
elif request.user.has_perm('trans.overwrite_translation'):
form = UploadForm(request.POST, request.FILES)
else:
form = SimpleUploadForm(request.POST, request.FILES)
if form.is_valid():
if request.user.has_perm('trans.author_translation') and form.cleaned_data['author_name'] != '' and form.cleaned_data['author_email'] != '':
author = '%s <%s>' % (form.cleaned_data['author_name'], form.cleaned_data['author_email'])
else:
author = None
if request.user.has_perm('trans.overwrite_translation'):
overwrite = form.cleaned_data['overwrite']
else:
overwrite = False
try:
ret = obj.merge_upload(request, request.FILES['file'], overwrite, author, merge_header=form.cleaned_data['merge_header'])
if ret:
messages.info(request, _('File content successfully merged into translation.'))
else:
messages.info(request, _('There were no new strings in uploaded file.'))
except Exception as e:
messages.error(request, _('File content merge failed: %s' % unicode(e)))
return HttpResponseRedirect(obj.get_absolute_url())
def not_found(request):
'''
Error handler showing list of available projects.
'''
template = loader.get_template('404.html')
return HttpResponseNotFound(
template.render(RequestContext(request, {
'request_path': request.path,
'title': _('Page Not Found'),
'projects': Project.objects.all_acl(request.user),
}
)))
# Cache this page for one month, it should not really change much
@cache_page(30 * 24 * 3600)
def js_config(request):
'''
Generates settings for javascript. Includes things like
API keys for translaiton services or list of languages they
support.
'''
# Apertium support
if appsettings.MT_APERTIUM_KEY is not None and appsettings.MT_APERTIUM_KEY != '':
try:
listpairs = urllib2.urlopen('http://api.apertium.org/json/listPairs?key=%s' % appsettings.MT_APERTIUM_KEY)
pairs = listpairs.read()
parsed = json.loads(pairs)
apertium_langs = [p['targetLanguage'] for p in parsed['responseData'] if p['sourceLanguage'] == 'en']
except Exception as e:
logger.error('failed to get supported languages from Apertium, using defaults (%s)', str(e))
apertium_langs = ['gl', 'ca', 'es', 'eo']
else:
apertium_langs = None
# Microsoft translator support
if appsettings.MT_MICROSOFT_KEY is not None and appsettings.MT_MICROSOFT_KEY != '':
try:
listpairs = urllib2.urlopen('http://api.microsofttranslator.com/V2/Http.svc/GetLanguagesForTranslate?appID=%s' % appsettings.MT_MICROSOFT_KEY)
data = listpairs.read()
parsed = ElementTree.fromstring(data)
microsoft_langs = [p.text for p in parsed.getchildren()]
except Exception as e:
logger.error('failed to get supported languages from Microsoft, using defaults (%s)', str(e))
microsoft_langs = [
'ar', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'cs', 'da', 'nl', 'en',
'et', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu',
'id', 'it', 'ja', 'ko', 'lv', 'lt', 'no', 'fa', 'pl', 'pt',
'ro', 'ru', 'sk', 'sl', 'es', 'sv', 'th', 'tr', 'uk', 'vi'
]
else:
microsoft_langs = None
return render_to_response('js/config.js', RequestContext(request, {
'apertium_langs': apertium_langs,
'microsoft_langs': microsoft_langs,
}),
mimetype = 'application/javascript')
def about(request):
context = {}
versions = get_versions()
totals = Profile.objects.aggregate(Sum('translated'), Sum('suggested'))
total_strings = 0
for project in SubProject.objects.iterator():
try:
total_strings += project.translation_set.all()[0].total
except Translation.DoesNotExist:
pass
context['title'] = _('About Weblate')
context['total_translations'] = totals['translated__sum']
context['total_suggestions'] = totals['suggested__sum']
context['total_users'] = Profile.objects.count()
context['total_strings'] = total_strings
context['total_languages'] = Language.objects.filter(
translation__total__gt=0
).distinct().count()
context['total_checks'] = Check.objects.count()
context['ignored_checks'] = Check.objects.filter(ignore=True).count()
context['versions'] = versions
return render_to_response('about.html', RequestContext(request, context))
@user_passes_test(lambda u: u.has_perm('trans.commit_translation') or u.has_perm('trans.update_translation'))
def git_status_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
return render_to_response('js/git-status.html', RequestContext(request, {
'object': obj,
}))
@user_passes_test(lambda u: u.has_perm('trans.commit_translation') or u.has_perm('trans.update_translation'))
def git_status_subproject(request, project, subproject):
obj = get_object_or_404(SubProject, slug=subproject, project__slug=project)
obj.check_acl(request)
return render_to_response('js/git-status.html', RequestContext(request, {
'object': obj,
}))
@user_passes_test(lambda u: u.has_perm('trans.commit_translation') or u.has_perm('trans.update_translation'))
def git_status_translation(request, project, subproject, lang):
obj = get_object_or_404(Translation, language__code=lang, subproject__slug=subproject, subproject__project__slug=project, enabled=True)
obj.check_acl(request)
return render_to_response('js/git-status.html', RequestContext(request, {
'object': obj,
}))
def data_root(request):
site = Site.objects.get_current()
return render_to_response('data-root.html', RequestContext(request, {
'site_domain': site.domain,
'api_docs': weblate.get_doc_url('api', 'exports'),
'rss_docs': weblate.get_doc_url('api', 'rss'),
'projects': Project.objects.all_acl(request.user),
}))
def data_project(request, project):
obj = get_object_or_404(Project, slug=project)
obj.check_acl(request)
site = Site.objects.get_current()
return render_to_response('data.html', RequestContext(request, {
'object': obj,
'site_domain': site.domain,
'api_docs': weblate.get_doc_url('api', 'exports'),
'rss_docs': weblate.get_doc_url('api', 'rss'),
}))
| power12317/weblate | weblate/trans/views.py | Python | gpl-3.0 | 68,329 |
# coding=utf-8
"""Dummy test.
Pointless dummy test.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# import pysnapsync.server
# import pysnapsync.client
def inc(arg):
"""Return arg incremented by one."""
return arg + 1
def test_answer():
"""Assert 3+1 == 4."""
assert inc(3) == 4
| dtaylor84/pysnapsync | pysnapsync/tests/test_dummy.py | Python | gpl-3.0 | 370 |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import View
from .models import \
Course, Registration, Task, TaskSubmission, ScoreProfile
from .forms import TaskSubmissionForm
class CourseListView(View):
template_name = 'courses/course_select.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = {
'courses': request.user.course_set.all(),
'profile': ScoreProfile.get_score_profile(request.user),
'highscore': ScoreProfile.objects.all().order_by('-score')[:10]
}
return render(request,
self.template_name,
context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
pass
class ProfileView(View):
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = {}
profile = ScoreProfile.get_score_profile(request.user)
context['username'] = request.user.username
context['rank'] = profile.current_rank
context['score'] = profile.score
context['courses'] = request.user.course_set.all()
context['valid_submissions'] = \
TaskSubmission.objects.filter(submitted_by=request.user,
valid=True).values_list('task',
flat=True)
return render(request, 'courses/profile.html', context)
class TaskSubmissionView(View):
form_class = TaskSubmissionForm
template_name = 'courses/task.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data()
context['form'] = self.form_class()
context['subs'] = TaskSubmission.objects.filter(
submitted_by=request.user,
task=self.kwargs['task_id']
)
context['valid_subs'] = context['subs'].filter(
valid=True
)
return render(request, self.template_name, context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
task = Task.objects.get(pk=self.kwargs['task_id'])
sub = TaskSubmission()
sub.task = task
sub.submitted_by = request.user
sub.valid = False
form = self.form_class(request.POST, request.FILES, instance=sub)
if form.is_valid():
form.save()
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = {}
context['task'] = Task.objects.get(pk=self.kwargs['task_id'])
return context
class CourseRegistrationView(View):
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
course_id = request.POST['course_id']
course = Course.objects.get(pk=course_id)
if course:
Registration.objects.filter(user=request.user,
course=course).delete()
else:
return
if request.POST['sign_up'] == u'master':
Registration(user=request.user,
course=course,
granted=False,
code_master=True,
role=Registration.CODE_MASTER).save()
elif request.POST['sign_up'] == u'kid':
Registration(user=request.user,
course=course,
granted=False,
code_master=False,
role=Registration.KID).save()
elif request.POST['sign_up'] == u'reserve':
Registration(user=request.user,
course=course,
granted=False,
code_master=False,
role=Registration.RESERVE).save()
return
| iver56/trondheim.kodeklubben.no | backend/wsgi/courses/views.py | Python | gpl-3.0 | 4,033 |
import hashlib
import re
import os
import pickle
from functools import partial
from externals.lib.misc import file_scan, update_dict
import logging
log = logging.getLogger(__name__)
VERSION = "0.0"
# Constants --------------------------------------------------------------------
DEFAULT_DESTINATION = './files/'
DEFAULT_CACHE_FILENAME = 'hash_cache.pickle'
DEFAULT_FILE_EXTS = {'mp4', 'avi', 'rm', 'mkv', 'ogm', 'ssa', 'srt', 'ass'}
# Utils ------------------------------------------------------------------------
def hash_files(folder, file_regex=None, hasher=hashlib.sha256):
return {
f.hash: f
for f in file_scan(folder, file_regex=file_regex, hasher=hasher)
}
# ------------------------------------------------------------------------------
def hash_source_dest(source_folder=None, destination_folder=None, hasher=hashlib.sha256, file_exts=DEFAULT_FILE_EXTS, **kwargs):
file_regex = re.compile(r'.*\.({})$'.format('|'.join(file_exts)))
gen_hashs_folder = partial(hash_files, **dict(hasher=hasher, file_regex=file_regex))
return {
'source_files': gen_hashs_folder(source_folder),
'destination_files': gen_hashs_folder(destination_folder),
}
def symlink_matched_files(source_files=None, destination_files=None, destination_folder=None, dry_run=False, **kwargs):
for key in sorted(set(source_files.keys()).difference(set(destination_files.keys())), key=lambda key: source_files[key].file):
f = source_files[key]
log.debug(f.file)
if not dry_run:
try:
os.symlink(f.absolute, os.path.join(destination_folder, f.file))
except OSError:
log.info('unable to symlink {0}'.format(f.file))
# ------------------------------------------------------------------------------
def move_files():
pass
# Command Line -----------------------------------------------------------------
def get_args():
import argparse
parser = argparse.ArgumentParser(
description="""
Find the duplicates
""",
epilog=""" """
)
# Folders
parser.add_argument('-d', '--destination_folder', action='store', help='', default=DEFAULT_DESTINATION)
parser.add_argument('-s', '--source_folder', action='store', help='', required=True)
parser.add_argument('-e', '--file_exts', nargs='*', help='file exts to find', default=DEFAULT_FILE_EXTS)
# Operation
#parser.add_argument('-c', '--copy', action='store_true', help='copy files to destination (to be ready for importing)', default=False)
# Cache
parser.add_argument('--cache_filename', action='store', help='', default=DEFAULT_CACHE_FILENAME)
# Common
parser.add_argument('--dry_run', action='store_true', help='', default=False)
parser.add_argument('-v', '--verbose', action='store_true', help='', default=False)
parser.add_argument('--version', action='version', version=VERSION)
args = vars(parser.parse_args())
return args
def main():
args = get_args()
logging.basicConfig(level=logging.DEBUG if args['verbose'] else logging.INFO)
try:
with open(args['cache_filename'], 'rb') as f:
data = pickle.load(f)
except IOError:
with open(args['cache_filename'], 'wb') as f:
data = hash_source_dest(**args)
pickle.dump(data, f)
symlink_matched_files(**update_dict(args.copy(), data))
if __name__ == "__main__":
main()
| richlanc/KaraKara | website/karakara/scripts/hash_matcher.py | Python | gpl-3.0 | 3,469 |
from controlscript import *
print "This is a simple control script. It just does nothing and exits successfully."
print "Start parameter is %s, additional parameters are %s" % (start, arguments)
class DoNothing(ControlAction):
""" Control script action for exiting with error 1 on stop """
def __init__(self):
ControlAction.__init__(self, "Do nothing")
def start(self):
print "Do nothing on start"
print
def stop(self):
print "Do nothing on stop"
print
ControlScript([
DoNothing()
]) | remybaranx/qtaste | Testbeds/ControlScripts/do_nothing.py | Python | gpl-3.0 | 507 |
import logging
from django.core.management.base import BaseCommand
from catalog.core.visualization.data_access import visualization_cache
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''Build pandas dataframe cache of primary data'''
def handle(self, *args, **options):
visualization_cache.get_or_create_many() | comses/catalog | catalog/core/management/commands/populate_visualization_cache.py | Python | gpl-3.0 | 359 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 13:38:36 2015
@author: madengr
"""
from gnuradio import gr
import osmosdr
from gnuradio import filter as grfilter # Don't redefine Python's filter()
from gnuradio import blocks
from gnuradio import fft
from gnuradio.fft import window
from gnuradio import analog
from gnuradio import audio
import os
import time
import numpy as np
from gnuradio.filter import pfb
class BaseTuner(gr.hier_block2):
"""Some base methods that are the same between the known tuner types.
See TunerDemodNBFM and TunerDemodAM for better documentation.
"""
def set_center_freq(self, center_freq, rf_center_freq):
"""Sets baseband center frequency and file name
Sets baseband center frequency of frequency translating FIR filter
Also sets file name of wave file sink
If tuner is tuned to zero Hz then set to file name to /dev/null
Otherwise set file name to tuned RF frequency in MHz
Args:
center_freq (float): Baseband center frequency in Hz
rf_center_freq (float): RF center in Hz (for file name)
"""
# Since the frequency (hence file name) changed, then close it
self.blocks_wavfile_sink.close()
# If we never wrote any data to the wavfile sink, delete the file
self._delete_wavfile_if_empty()
# Set the frequency
self.freq_xlating_fir_filter_ccc.set_center_freq(center_freq)
self.center_freq = center_freq
# Set the file name
if self.center_freq == 0 or not self.record:
# If tuner at zero Hz, or record false, then file name to /dev/null
file_name = "/dev/null"
else:
# Otherwise use frequency and time stamp for file name
tstamp = "_" + str(int(time.time()))
file_freq = (rf_center_freq + self.center_freq)/1E6
file_freq = np.round(file_freq, 3)
file_name = 'wav/' + '{:.3f}'.format(file_freq) + tstamp + ".wav"
# Make sure the 'wav' directory exists
try:
os.mkdir('wav')
except OSError: # will need to add something here for Win support
pass # directory already exists
self.file_name = file_name
self.blocks_wavfile_sink.open(self.file_name)
def _delete_wavfile_if_empty(self):
"""Delete the current wavfile if it's empty."""
if (not self.record or not self.file_name or
self.file_name == '/dev/null'):
return
# If we never wrote any data to the wavfile sink, delete
# the (empty) wavfile
if os.stat(self.file_name).st_size in (44, 0): # ugly hack
os.unlink(self.file_name) # delete the file
def set_squelch(self, squelch_db):
"""Sets the threshold for both squelches
Args:
squelch_db (float): Squelch in dB
"""
self.analog_pwr_squelch_cc.set_threshold(squelch_db)
def __del__(self):
"""Called when the object is destroyed."""
# Make a best effort attempt to clean up our wavfile if it's empty
try:
self._delete_wavfile_if_empty()
except Exception:
pass # oh well, we're dying anyway
class TunerDemodNBFM(BaseTuner):
"""Tuner, demodulator, and recorder chain for narrow band FM demodulation
Kept as it's own class so multiple can be instantiated in parallel
Accepts complex baseband samples at 1 Msps minimum
Frequency translating FIR filter tunes from -samp_rate/2 to +samp_rate/2
The following sample rates assume 1 Msps input
First two stages of decimation are 5 each for a total of 25
Thus first two stages brings 1 Msps down to 40 ksps
The third stage decimates by int(samp_rate/1E6)
Thus output rate will vary from 40 ksps to 79.99 ksps
The channel is filtered to 12.5 KHz bandwidth followed by squelch
The squelch is non-blocking since samples will be added with other demods
The quadrature demod is followed by a forth stage of decimation by 5
This brings the sample rate down to 8 ksps to 15.98 ksps
The audio is low-pass filtered to 3.5 kHz bandwidth
The polyphase resampler resamples by samp_rate/(decims[1] * decims[0]**3)
This results in a constant 8 ksps, irrespective of RF sample rate
This 8 ksps audio stream may be added to other demos streams
The audio is run through an additional blocking squelch at -200 dB
This stops the sample flow so squelced audio is not recorded to file
The wav file sink stores 8-bit samples (grainy quality but compact)
Default demodulator center freqwuency is 0 Hz
This is desired since hardware DC removal reduces sensitivity at 0 Hz
NBFM demod of LO leakage will just be 0 amplitude
Args:
samp_rate (float): Input baseband sample rate in sps (1E6 minimum)
audio_rate (float): Output audio sample rate in sps (8 kHz minimum)
record (bool): Record audio to file if True
Attributes:
center_freq (float): Baseband center frequency in Hz
record (bool): Record audio to file if True
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, samp_rate=4E6, audio_rate=8000, record=True):
gr.hier_block2.__init__(self, "TunerDemodNBFM",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_float))
# Default values
self.center_freq = 0
squelch_db = -60
self.quad_demod_gain = 0.050
self.file_name = "/dev/null"
self.record = record
# Decimation values for four stages of decimation
decims = (5, int(samp_rate/1E6))
# Low pass filter taps for decimation by 5
low_pass_filter_taps_0 = \
grfilter.firdes_low_pass(1, 1, 0.090, 0.010,
grfilter.firdes.WIN_HAMMING)
# Frequency translating FIR filter decimating by 5
self.freq_xlating_fir_filter_ccc = \
grfilter.freq_xlating_fir_filter_ccc(decims[0],
low_pass_filter_taps_0,
self.center_freq, samp_rate)
# FIR filter decimating by 5
fir_filter_ccc_0 = grfilter.fir_filter_ccc(decims[0],
low_pass_filter_taps_0)
# Low pass filter taps for decimation from samp_rate/25 to 40-79.9 ksps
# In other words, decimation by int(samp_rate/1E6)
# 12.5 kHz cutoff for NBFM channel bandwidth
low_pass_filter_taps_1 = grfilter.firdes_low_pass(
1, samp_rate/decims[0]**2, 12.5E3, 1E3, grfilter.firdes.WIN_HAMMING)
# FIR filter decimation by int(samp_rate/1E6)
fir_filter_ccc_1 = grfilter.fir_filter_ccc(decims[1],
low_pass_filter_taps_1)
# Non blocking power squelch
self.analog_pwr_squelch_cc = analog.pwr_squelch_cc(squelch_db,
1e-1, 0, False)
# Quadrature demod with gain set for decent audio
# The gain will be later multiplied by the 0 dB normalized volume
self.analog_quadrature_demod_cf = \
analog.quadrature_demod_cf(self.quad_demod_gain)
# 3.5 kHz cutoff for audio bandwidth
low_pass_filter_taps_2 = grfilter.firdes_low_pass(1,\
samp_rate/(decims[1] * decims[0]**2),\
3.5E3, 500, grfilter.firdes.WIN_HAMMING)
# FIR filter decimating by 5 from 40-79.9 ksps to 8-15.98 ksps
fir_filter_fff_0 = grfilter.fir_filter_fff(decims[0],
low_pass_filter_taps_2)
# Polyphase resampler allows arbitary RF sample rates
# Takes 8-15.98 ksps to a constant 8 ksps for audio
pfb_resamp = audio_rate/float(samp_rate/(decims[1] * decims[0]**3))
pfb_arb_resampler_fff = pfb.arb_resampler_fff(pfb_resamp, taps=None,
flt_size=32)
# Connect the blocks for the demod
self.connect(self, self.freq_xlating_fir_filter_ccc)
self.connect(self.freq_xlating_fir_filter_ccc, fir_filter_ccc_0)
self.connect(fir_filter_ccc_0, fir_filter_ccc_1)
self.connect(fir_filter_ccc_1, self.analog_pwr_squelch_cc)
self.connect(self.analog_pwr_squelch_cc,
self.analog_quadrature_demod_cf)
self.connect(self.analog_quadrature_demod_cf, fir_filter_fff_0)
self.connect(fir_filter_fff_0, pfb_arb_resampler_fff)
self.connect(pfb_arb_resampler_fff, self)
# Need to set this to a very low value of -200 since it is after demod
# Only want it to gate when the previuos squelch has gone to zero
analog_pwr_squelch_ff = analog.pwr_squelch_ff(-200, 1e-1, 0, True)
# File sink with single channel and 8 bits/sample
self.blocks_wavfile_sink = blocks.wavfile_sink(self.file_name, 1,
audio_rate, 8)
# Connect the blocks for recording
self.connect(pfb_arb_resampler_fff, analog_pwr_squelch_ff)
self.connect(analog_pwr_squelch_ff, self.blocks_wavfile_sink)
def set_volume(self, volume_db):
"""Sets the volume
Args:
volume_db (float): Volume in dB
"""
gain = self.quad_demod_gain * 10**(volume_db/20.0)
self.analog_quadrature_demod_cf.set_gain(gain)
class TunerDemodAM(BaseTuner):
"""Tuner, demodulator, and recorder chain for AM demodulation
Kept as it's own class so multiple can be instantiated in parallel
Accepts complex baseband samples at 1 Msps minimum
Frequency translating FIR filter tunes from -samp_rate/2 to +samp_rate/2
The following sample rates assume 1 Msps input
First two stages of decimation are 5 each for a total of 25
Thus first two stages brings 1 Msps down to 40 ksps
The third stage decimates by int(samp_rate/1E6)
Thus output rate will vary from 40 ksps to 79.99 ksps
The channel is filtered to 12.5 KHz bandwidth followed by squelch
The squelch is non-blocking since samples will be added with other demods
The AGC sets level (volume) prior to AM demod
The AM demod is followed by a forth stage of decimation by 5
This brings the sample rate down to 8 ksps to 15.98 ksps
The audio is low-pass filtered to 3.5 kHz bandwidth
The polyphase resampler resamples by samp_rate/(decims[1] * decims[0]**3)
This results in a constant 8 ksps, irrespective of RF sample rate
This 8 ksps audio stream may be added to other demos streams
The audio is run through an additional blocking squelch at -200 dB
This stops the sample flow so squelced audio is not recorded to file
The wav file sink stores 8-bit samples (grainy quality but compact)
Default demodulator center freqwuency is 0 Hz
This is desired since hardware DC removal reduces sensitivity at 0 Hz
AM demod of LO leakage will just be 0 amplitude
Args:
samp_rate (float): Input baseband sample rate in sps (1E6 minimum)
audio_rate (float): Output audio sample rate in sps (8 kHz minimum)
record (bool): Record audio to file if True
Attributes:
center_freq (float): Baseband center frequency in Hz
record (bool): Record audio to file if True
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
def __init__(self, samp_rate=4E6, audio_rate=8000, record=True):
gr.hier_block2.__init__(self, "TunerDemodAM",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_float))
# Default values
self.center_freq = 0
squelch_db = -60
self.agc_ref = 0.1
self.file_name = "/dev/null"
self.record = record
# Decimation values for four stages of decimation
decims = (5, int(samp_rate/1E6))
# Low pass filter taps for decimation by 5
low_pass_filter_taps_0 = \
grfilter.firdes_low_pass(1, 1, 0.090, 0.010,
grfilter.firdes.WIN_HAMMING)
# Frequency translating FIR filter decimating by 5
self.freq_xlating_fir_filter_ccc = \
grfilter.freq_xlating_fir_filter_ccc(decims[0],
low_pass_filter_taps_0,
self.center_freq, samp_rate)
# FIR filter decimating by 5
fir_filter_ccc_0 = grfilter.fir_filter_ccc(decims[0],
low_pass_filter_taps_0)
# Low pass filter taps for decimation from samp_rate/25 to 40-79.9 ksps
# In other words, decimation by int(samp_rate/1E6)
# 12.5 kHz cutoff for NBFM channel bandwidth
low_pass_filter_taps_1 = grfilter.firdes_low_pass(
1, samp_rate/decims[0]**2, 12.5E3, 1E3, grfilter.firdes.WIN_HAMMING)
# FIR filter decimation by int(samp_rate/1E6)
fir_filter_ccc_1 = grfilter.fir_filter_ccc(decims[1],
low_pass_filter_taps_1)
# Non blocking power squelch
# Squelch level needs to be lower than NBFM or else choppy AM demod
self.analog_pwr_squelch_cc = analog.pwr_squelch_cc(squelch_db,
1e-1, 0, False)
# AGC with reference set for nomninal 0 dB volume
# Paramaters tweaked to prevent impulse during squelching
self.agc3_cc = analog.agc3_cc(1.0, 1E-4, self.agc_ref, 10, 1)
self.agc3_cc.set_max_gain(65536)
# AM demod with complex_to_mag()
# Can't use analog.am_demod_cf() since it won't work with N>2 demods
am_demod_cf = blocks.complex_to_mag(1)
# 3.5 kHz cutoff for audio bandwidth
low_pass_filter_taps_2 = grfilter.firdes_low_pass(1,\
samp_rate/(decims[1] * decims[0]**2),\
3.5E3, 500, grfilter.firdes.WIN_HAMMING)
# FIR filter decimating by 5 from 40-79.9 ksps to 8-15.98 ksps
fir_filter_fff_0 = grfilter.fir_filter_fff(decims[0],
low_pass_filter_taps_2)
# Polyphase resampler allows arbitary RF sample rates
# Takes 8-15.98 ksps to a constant 8 ksps for audio
pfb_resamp = audio_rate/float(samp_rate/(decims[1] * decims[0]**3))
pfb_arb_resampler_fff = pfb.arb_resampler_fff(pfb_resamp, taps=None,
flt_size=32)
# Connect the blocks for the demod
self.connect(self, self.freq_xlating_fir_filter_ccc)
self.connect(self.freq_xlating_fir_filter_ccc, fir_filter_ccc_0)
self.connect(fir_filter_ccc_0, fir_filter_ccc_1)
self.connect(fir_filter_ccc_1, self.analog_pwr_squelch_cc)
self.connect(self.analog_pwr_squelch_cc, self.agc3_cc)
self.connect(self.agc3_cc, am_demod_cf)
self.connect(am_demod_cf, fir_filter_fff_0)
self.connect(fir_filter_fff_0, pfb_arb_resampler_fff)
self.connect(pfb_arb_resampler_fff, self)
# Need to set this to a very low value of -200 since it is after demod
# Only want it to gate when the previuos squelch has gone to zero
analog_pwr_squelch_ff = analog.pwr_squelch_ff(-200, 1e-1, 0, True)
# File sink with single channel and 8 bits/sample
self.blocks_wavfile_sink = blocks.wavfile_sink(self.file_name, 1,
audio_rate, 8)
# Connect the blocks for recording
self.connect(pfb_arb_resampler_fff, analog_pwr_squelch_ff)
self.connect(analog_pwr_squelch_ff, self.blocks_wavfile_sink)
def set_volume(self, volume_db):
"""Sets the volume
Args:
volume_db (float): Volume in dB
"""
agc_ref = self.agc_ref * 10**(volume_db/20.0)
self.agc3_cc.set_reference(agc_ref)
class Receiver(gr.top_block):
"""Receiver for narrow band frequency modulation
Controls hardware and instantiates multiple tuner/demodulators
Generates FFT power spectrum for channel estimation
Args:
ask_samp_rate (float): Asking sample rate of hardware in sps (1E6 min)
num_demod (int): Number of parallel demodulators
type_demod (int): Type of demodulator (0=NBFM, 1=AM)
hw_args (string): Argument string to pass to harwdare
freq_correction (int): Frequency correction in ppm
record (bool): Record audio to file if True
Attributes:
center_freq (float): Hardware RF center frequency in Hz
samp_rate (float): Hardware sample rate in sps (1E6 min)
gain_db (int): Hardware RF gain in dB
squelch_db (int): Squelch in dB
volume_dB (int): Volume in dB
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments
def __init__(self, ask_samp_rate=4E6, num_demod=4, type_demod=0,
hw_args="uhd", freq_correction=0, record=True, play=True):
# Call the initialization method from the parent class
gr.top_block.__init__(self, "Receiver")
# Default values
self.center_freq = 144E6
self.gain_db = 10
self.squelch_db = -70
self.volume_db = 0
audio_rate = 8000
# Setup the USRP source, or use the USRP sim
self.src = osmosdr.source(args="numchan=" + str(1) + " " + hw_args)
self.src.set_sample_rate(ask_samp_rate)
self.src.set_gain(self.gain_db)
self.src.set_center_freq(self.center_freq)
self.src.set_freq_corr(freq_correction)
# Get the sample rate and center frequency from the hardware
self.samp_rate = self.src.get_sample_rate()
self.center_freq = self.src.get_center_freq()
# Set the I/Q bandwidth to 80 % of sample rate
self.src.set_bandwidth(0.8 * self.samp_rate)
# NBFM channel is about 10 KHz wide
# Want about 3 FFT bins to span a channel
# Use length FFT so 4 Msps / 1024 = 3906.25 Hz/bin
# This also means 3906.25 vectors/second
# Using below formula keeps FFT size a power of two
# Also keeps bin size constant for power of two sampling rates
# Use of 256 sets 3906.25 Hz/bin; increase to reduce bin size
samp_ratio = self.samp_rate / 1E6
fft_length = 256 * int(pow(2, np.ceil(np.log(samp_ratio)/np.log(2))))
# -----------Flow for FFT--------------
# Convert USRP steam to vector
stream_to_vector = blocks.stream_to_vector(gr.sizeof_gr_complex*1,
fft_length)
# Want about 1000 vector/sec
amount = int(round(self.samp_rate/fft_length/1000))
keep_one_in_n = blocks.keep_one_in_n(gr.sizeof_gr_complex*
fft_length, amount)
# Take FFT
fft_vcc = fft.fft_vcc(fft_length, True,
window.blackmanharris(fft_length), True, 1)
# Compute the power
complex_to_mag_squared = blocks.complex_to_mag_squared(fft_length)
# Video average and decimate from 1000 vector/sec to 10 vector/sec
integrate_ff = blocks.integrate_ff(100, fft_length)
# Probe vector
self.probe_signal_vf = blocks.probe_signal_vf(fft_length)
# Connect the blocks
self.connect(self.src, stream_to_vector, keep_one_in_n,
fft_vcc, complex_to_mag_squared,
integrate_ff, self.probe_signal_vf)
# -----------Flow for Demod--------------
# Create N parallel demodulators as a list of objects
# Default to NBFM demod
self.demodulators = []
for idx in range(num_demod):
if type_demod == 1:
self.demodulators.append(TunerDemodAM(self.samp_rate,
audio_rate, record))
else:
self.demodulators.append(TunerDemodNBFM(self.samp_rate,
audio_rate, record))
if play:
# Create an adder
add_ff = blocks.add_ff(1)
# Connect the demodulators between the source and adder
for idx, demodulator in enumerate(self.demodulators):
self.connect(self.src, demodulator, (add_ff, idx))
# Audio sink
audio_sink = audio.sink(audio_rate)
# Connect the summed outputs to the audio sink
self.connect(add_ff, audio_sink)
else:
# Just connect each demodulator to the receiver source
for demodulator in self.demodulators:
self.connect(self.src, demodulator)
def set_center_freq(self, center_freq):
"""Sets RF center frequency of hardware
Args:
center_freq (float): Hardware RF center frequency in Hz
"""
# Tune the hardware
self.src.set_center_freq(center_freq)
# Update center frequency with hardware center frequency
# Do this to account for slight hardware offsets
self.center_freq = self.src.get_center_freq()
def set_gain(self, gain_db):
"""Sets gain of RF hardware
Args:
gain_db (float): Hardware RF gain in dB
"""
self.src.set_gain(gain_db)
self.gain_db = self.src.get_gain()
def set_squelch(self, squelch_db):
"""Sets squelch of all demodulators and clamps range
Args:
squelch_db (float): Squelch in dB
"""
self.squelch_db = max(min(0, squelch_db), -100)
for demodulator in self.demodulators:
demodulator.set_squelch(self.squelch_db)
def set_volume(self, volume_db):
"""Sets volume of all demodulators and clamps range
Args:
volume_db (float): Volume in dB
"""
self.volume_db = max(min(20, volume_db), -20)
for demodulator in self.demodulators:
demodulator.set_volume(self.volume_db)
def get_demod_freqs(self):
"""Gets baseband frequencies of all demodulators
Returns:
List[float]: List of baseband center frequencies in Hz
"""
center_freqs = []
for demodulator in self.demodulators:
center_freqs.append(demodulator.center_freq)
return center_freqs
def main():
"""Test the receiver
Sets up the hadrware
Tunes a couple of demodulators
Prints the max power spectrum
"""
# Create receiver object
ask_samp_rate = 4E6
num_demod = 4
type_demod = 0
hw_args = "uhd"
freq_correction = 0
record = False
play = True
receiver = Receiver(ask_samp_rate, num_demod, type_demod, hw_args,
freq_correction, record, play)
# Start the receiver and wait for samples to accumulate
receiver.start()
time.sleep(1)
# Set frequency, gain, squelch, and volume
center_freq = 144.5E6
receiver.set_center_freq(center_freq)
receiver.set_gain(10)
print "\n"
print "Started %s at %.3f Msps" % (hw_args, receiver.samp_rate/1E6)
print "RX at %.3f MHz with %d dB gain" % (receiver.center_freq/1E6,
receiver.gain_db)
receiver.set_squelch(-60)
receiver.set_volume(0)
print "%d demods of type %d at %d dB squelch and %d dB volume" % \
(num_demod, type_demod, receiver.squelch_db, receiver.volume_db)
# Create some baseband channels to tune based on 144 MHz center
channels = np.zeros(num_demod)
channels[0] = 144.39E6 - receiver.center_freq # APRS
channels[1] = 144.6E6 - receiver.center_freq
# Tune demodulators to baseband channels
# If recording on, this creates empty wav file since manually tuning.
for idx, demodulator in enumerate(receiver.demodulators):
demodulator.set_center_freq(channels[idx], center_freq)
# Print demodulator info
for idx, channel in enumerate(channels):
print "Tuned demod %d to %.3f MHz" % (idx,
(channel+receiver.center_freq)
/1E6)
while 1:
# No need to go faster than 10 Hz rate of GNU Radio probe
# Just do 1 Hz here
time.sleep(1)
# Grab the FFT data and print max value
spectrum = receiver.probe_signal_vf.level()
print "Max spectrum of %.3f" % (np.max(spectrum))
# Stop the receiver
receiver.stop()
receiver.wait()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| kaback/ham2mon | apps/receiver.py | Python | gpl-3.0 | 25,151 |
# revlog.py - storage back-end for mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Storage back-end for Mercurial.
This provides efficient delta storage with O(1) retrieve and append
and O(changes) merge between branches.
"""
# import stuff from node for others to import from revlog
from node import bin, hex, nullid, nullrev
from i18n import _
import ancestor, mdiff, parsers, error, util, dagutil
import struct, zlib, errno
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
# revlog header flags
REVLOGV0 = 0
REVLOGNG = 1
REVLOGNGINLINEDATA = (1 << 16)
REVLOGGENERALDELTA = (1 << 17)
REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
REVLOG_DEFAULT_FORMAT = REVLOGNG
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
# revlog index flags
REVIDX_KNOWN_FLAGS = 0
# max size of revlog with inline data
_maxinline = 131072
_chunksize = 1048576
RevlogError = error.RevlogError
LookupError = error.LookupError
def getoffset(q):
return int(q >> 16)
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
nullhash = _sha(nullid)
def hash(text, p1, p2):
"""generate a hash from the given text and its parent hashes
This hash combines both the current file contents and its history
in a manner that makes it easy to distinguish nodes with the same
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
if p2 == nullid:
# deep copy of a hash is faster than creating one
s = nullhash.copy()
s.update(p1)
else:
# none of the parent nodes are nullid
l = [p1, p2]
l.sort()
s = _sha(l[0])
s.update(l[1])
s.update(text)
return s.digest()
def compress(text):
""" generate a possibly-compressed representation of text """
if not text:
return ("", text)
l = len(text)
bin = None
if l < 44:
pass
elif l > 1000000:
# zlib makes an internal copy, thus doubling memory usage for
# large files, so lets do this in pieces
z = zlib.compressobj()
p = []
pos = 0
while pos < l:
pos2 = pos + 2**20
p.append(z.compress(text[pos:pos2]))
pos = pos2
p.append(z.flush())
if sum(map(len, p)) < l:
bin = "".join(p)
else:
bin = _compress(text)
if bin is None or len(bin) > l:
if text[0] == '\0':
return ("", text)
return ('u', text)
return ("", bin)
def decompress(bin):
""" decompress the given input """
if not bin:
return bin
t = bin[0]
if t == '\0':
return bin
if t == 'x':
return _decompress(bin)
if t == 'u':
return bin[1:]
raise RevlogError(_("unknown compression type %r") % t)
indexformatv0 = ">4l20s20s20s"
v0shaoffset = 56
class revlogoldio(object):
def __init__(self):
self.size = struct.calcsize(indexformatv0)
def parseindex(self, data, inline):
s = self.size
index = []
nodemap = {nullid: nullrev}
n = off = 0
l = len(data)
while off + s <= l:
cur = data[off:off + s]
off += s
e = _unpack(indexformatv0, cur)
# transform to revlogv1 format
e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
index.append(e2)
nodemap[e[6]] = n
n += 1
# add the magic null revision at -1
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
return index, nodemap, None
def packentry(self, entry, node, version, rev):
if gettype(entry[0]):
raise RevlogError(_("index entry flags need RevlogNG"))
e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
node(entry[5]), node(entry[6]), entry[7])
return _pack(indexformatv0, *e2)
# index ng:
# 6 bytes: offset
# 2 bytes: flags
# 4 bytes: compressed length
# 4 bytes: uncompressed length
# 4 bytes: base rev
# 4 bytes: link rev
# 4 bytes: parent 1 rev
# 4 bytes: parent 2 rev
# 32 bytes: nodeid
indexformatng = ">Qiiiiii20s12x"
ngshaoffset = 32
versionformat = ">I"
class revlogio(object):
def __init__(self):
self.size = struct.calcsize(indexformatng)
def parseindex(self, data, inline):
# call the C implementation to parse the index data
index, cache = parsers.parse_index2(data, inline)
return index, getattr(index, 'nodemap', None), cache
def packentry(self, entry, node, version, rev):
p = _pack(indexformatng, *entry)
if rev == 0:
p = _pack(versionformat, version) + p[4:]
return p
class revlog(object):
"""
the underlying revision storage object
A revlog consists of two parts, an index and the revision data.
The index is a file with a fixed record size containing
information on each revision, including its nodeid (hash), the
nodeids of its parents, the position and offset of its data within
the data file, and the revision it's based on. Finally, each entry
contains a linkrev entry that can serve as a pointer to external
data.
The revision data itself is a linear collection of data chunks.
Each chunk represents a revision and is usually represented as a
delta against the previous chunk. To bound lookup time, runs of
deltas are limited to about 2 times the length of the original
version data. This makes retrieval of a version proportional to
its size, or O(1) relative to the number of revisions.
Both pieces of the revlog are written to in an append-only
fashion, which means we never need to rewrite a file to insert or
remove data, and can use some simple techniques to avoid the need
for locking while reading.
"""
def __init__(self, opener, indexfile):
"""
create a revlog object
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
"""
self.indexfile = indexfile
self.datafile = indexfile[:-2] + ".d"
self.opener = opener
self._cache = None
self._basecache = (0, 0)
self._chunkcache = (0, '')
self.index = []
self._pcache = {}
self._nodecache = {nullid: nullrev}
self._nodepos = None
v = REVLOG_DEFAULT_VERSION
opts = getattr(opener, 'options', None)
if opts is not None:
if 'revlogv1' in opts:
if 'generaldelta' in opts:
v |= REVLOGGENERALDELTA
else:
v = 0
i = ''
self._initempty = True
try:
f = self.opener(self.indexfile)
i = f.read()
f.close()
if len(i) > 0:
v = struct.unpack(versionformat, i[:4])[0]
self._initempty = False
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.version = v
self._inline = v & REVLOGNGINLINEDATA
self._generaldelta = v & REVLOGGENERALDELTA
flags = v & ~0xFFFF
fmt = v & 0xFFFF
if fmt == REVLOGV0 and flags:
raise RevlogError(_("index %s unknown flags %#04x for format v0")
% (self.indexfile, flags >> 16))
elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
raise RevlogError(_("index %s unknown flags %#04x for revlogng")
% (self.indexfile, flags >> 16))
elif fmt > REVLOGNG:
raise RevlogError(_("index %s unknown format %d")
% (self.indexfile, fmt))
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
try:
d = self._io.parseindex(i, self._inline)
except (ValueError, IndexError):
raise RevlogError(_("index %s is corrupted") % (self.indexfile))
self.index, nodemap, self._chunkcache = d
if nodemap is not None:
self.nodemap = self._nodecache = nodemap
if not self._chunkcache:
self._chunkclear()
def tip(self):
return self.node(len(self.index) - 2)
def __len__(self):
return len(self.index) - 1
def __iter__(self):
for i in xrange(len(self)):
yield i
@util.propertycache
def nodemap(self):
self.rev(self.node(0))
return self._nodecache
def hasnode(self, node):
try:
self.rev(node)
return True
except KeyError:
return False
def clearcaches(self):
try:
self._nodecache.clearcaches()
except AttributeError:
self._nodecache = {nullid: nullrev}
self._nodepos = None
def rev(self, node):
try:
return self._nodecache[node]
except RevlogError:
# parsers.c radix tree lookup failed
raise LookupError(node, self.indexfile, _('no node'))
except KeyError:
# pure python cache lookup failed
n = self._nodecache
i = self.index
p = self._nodepos
if p is None:
p = len(i) - 2
for r in xrange(p, -1, -1):
v = i[r][7]
n[v] = r
if v == node:
self._nodepos = r - 1
return r
raise LookupError(node, self.indexfile, _('no node'))
def node(self, rev):
return self.index[rev][7]
def linkrev(self, rev):
return self.index[rev][4]
def parents(self, node):
i = self.index
d = i[self.rev(node)]
return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
def parentrevs(self, rev):
return self.index[rev][5:7]
def start(self, rev):
return int(self.index[rev][0] >> 16)
def end(self, rev):
return self.start(rev) + self.length(rev)
def length(self, rev):
return self.index[rev][1]
def chainbase(self, rev):
index = self.index
base = index[rev][3]
while base != rev:
rev = base
base = index[rev][3]
return base
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
def rawsize(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
if l >= 0:
return l
t = self.revision(self.node(rev))
return len(t)
size = rawsize
def reachable(self, node, stop=None):
"""return the set of all nodes ancestral to a given node, including
the node itself, stopping when stop is matched"""
reachable = set((node,))
visit = [node]
if stop:
stopn = self.rev(stop)
else:
stopn = 0
while visit:
n = visit.pop(0)
if n == stop:
continue
if n == nullid:
continue
for p in self.parents(n):
if self.rev(p) < stopn:
continue
if p not in reachable:
reachable.add(p)
visit.append(p)
return reachable
def ancestors(self, *revs):
"""Generate the ancestors of 'revs' in reverse topological order.
Yield a sequence of revision numbers starting with the parents
of each revision in revs, i.e., each revision is *not* considered
an ancestor of itself. Results are in breadth-first order:
parents of each rev in revs, then parents of those, etc. Result
does not include the null revision."""
visit = list(revs)
seen = set([nullrev])
while visit:
for parent in self.parentrevs(visit.pop(0)):
if parent not in seen:
visit.append(parent)
seen.add(parent)
yield parent
def descendants(self, *revs):
"""Generate the descendants of 'revs' in revision order.
Yield a sequence of revision numbers starting with a child of
some rev in revs, i.e., each revision is *not* considered a
descendant of itself. Results are ordered by revision number (a
topological sort)."""
first = min(revs)
if first == nullrev:
for i in self:
yield i
return
seen = set(revs)
for i in xrange(first + 1, len(self)):
for x in self.parentrevs(i):
if x != nullrev and x in seen:
seen.add(i)
yield i
break
def findcommonmissing(self, common=None, heads=None):
"""Return a tuple of the ancestors of common and the ancestors of heads
that are not ancestors of common. In revset terminology, we return the
tuple:
::common, (::heads) - (::common)
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
common = [nullid]
if heads is None:
heads = self.heads()
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
# we want the ancestors, but inclusive
has = set(self.ancestors(*common))
has.add(nullrev)
has.update(common)
# take all ancestors from heads that aren't in has
missing = set()
visit = [r for r in heads if r not in has]
while visit:
r = visit.pop(0)
if r in missing:
continue
else:
missing.add(r)
for p in self.parentrevs(r):
if p not in has:
visit.append(p)
missing = list(missing)
missing.sort()
return has, [self.node(r) for r in missing]
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
More specifically, return a list of nodes N such that every N
satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
_common, missing = self.findcommonmissing(common, heads)
return missing
def nodesbetween(self, roots=None, heads=None):
"""Return a topological path from 'roots' to 'heads'.
Return a tuple (nodes, outroots, outheads) where 'nodes' is a
topologically sorted list of all nodes N that satisfy both of
these constraints:
1. N is a descendant of some node in 'roots'
2. N is an ancestor of some node in 'heads'
Every node is considered to be both a descendant and an ancestor
of itself, so every reachable node in 'roots' and 'heads' will be
included in 'nodes'.
'outroots' is the list of reachable nodes in 'roots', i.e., the
subset of 'roots' that is returned in 'nodes'. Likewise,
'outheads' is the subset of 'heads' that is also in 'nodes'.
'roots' and 'heads' are both lists of node IDs. If 'roots' is
unspecified, uses nullid as the only root. If 'heads' is
unspecified, uses list of all of the revlog's heads."""
nonodes = ([], [], [])
if roots is not None:
roots = list(roots)
if not roots:
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
roots = [nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
return ([self.node(r) for r in self], [nullid], list(self.heads()))
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
highestrev = len(self) - 1
# Set ancestors to None to signal that every node is an ancestor.
ancestors = None
# Set heads to an empty dictionary for later discovery of heads
heads = {}
else:
heads = list(heads)
if not heads:
return nonodes
ancestors = set()
# Turn heads into a dictionary so we can remove 'fake' heads.
# Also, later we will be using it to filter out the heads we can't
# find from roots.
heads = dict.fromkeys(heads, False)
# Start at the top and keep marking parents until we're done.
nodestotag = set(heads)
# Remember where the top was so we can use it as a limit later.
highestrev = max([self.rev(n) for n in nodestotag])
while nodestotag:
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
if n == nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
r = self.rev(n)
if r >= lowestrev:
if n not in ancestors:
# If we are possibly a descendant of one of the roots
# and we haven't already been marked as an ancestor
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update([p for p in self.parents(n) if
p != nullid])
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
# any other heads.
heads.pop(n)
if not ancestors:
return nonodes
# Now that we have our set of ancestors, we want to remove any
# roots that are not ancestors.
# If one of the roots was nullid, everything is included anyway.
if lowestrev > nullrev:
# But, since we weren't, let's recompute the lowest rev to not
# include roots that aren't ancestors.
# Filter out roots that aren't ancestors of heads
roots = [n for n in roots if n in ancestors]
# Recompute the lowest revision
if roots:
lowestrev = min([self.rev(n) for n in roots])
else:
# No more roots? Return empty list
return nonodes
else:
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
roots = [nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
# 'real' roots (i.e. are descended from other roots).
roots = descendants.copy()
# Our topologically sorted list of output nodes.
orderedout = []
# Don't start at nullid since we don't want nullid in our output list,
# and if nullid shows up in descedents, empty parents will look like
# they're descendants.
for r in xrange(max(lowestrev, 0), highestrev + 1):
n = self.node(r)
isdescendant = False
if lowestrev == nullrev: # Everybody is a descendant of nullid
isdescendant = True
elif n in descendants:
# n is already a descendant
isdescendant = True
# This check only needs to be done here because all the roots
# will start being marked is descendants before the loop.
if n in roots:
# If n was a root, check if it's a 'real' root.
p = tuple(self.parents(n))
# If any of its parents are descendants, it's not a root.
if (p[0] in descendants) or (p[1] in descendants):
roots.remove(n)
else:
p = tuple(self.parents(n))
# A node is a descendant if either of its parents are
# descendants. (We seeded the dependents list with the roots
# up there, remember?)
if (p[0] in descendants) or (p[1] in descendants):
descendants.add(n)
isdescendant = True
if isdescendant and ((ancestors is None) or (n in ancestors)):
# Only include nodes that are both descendants and ancestors.
orderedout.append(n)
if (ancestors is not None) and (n in heads):
# We're trying to figure out which heads are reachable
# from roots.
# Mark this head as having been reached
heads[n] = True
elif ancestors is None:
# Otherwise, we're trying to discover the heads.
# Assume this is a head because if it isn't, the next step
# will eventually remove it.
heads[n] = True
# But, obviously its parents aren't.
for p in self.parents(n):
heads.pop(p, None)
heads = [n for n, flag in heads.iteritems() if flag]
roots = list(roots)
assert orderedout
assert roots
assert heads
return (orderedout, roots, heads)
def headrevs(self):
count = len(self)
if not count:
return [nullrev]
ishead = [1] * (count + 1)
index = self.index
for r in xrange(count):
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0
return [r for r in xrange(count) if ishead[r]]
def heads(self, start=None, stop=None):
"""return the list of all nodes that have no children
if start is specified, only heads that are descendants of
start will be returned
if stop is specified, it will consider all the revs from stop
as if they had no children
"""
if start is None and stop is None:
if not len(self):
return [nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
start = nullid
if stop is None:
stop = []
stoprevs = set([self.rev(n) for n in stop])
startrev = self.rev(start)
reachable = set((startrev,))
heads = set((startrev,))
parentrevs = self.parentrevs
for r in xrange(startrev + 1, len(self)):
for p in parentrevs(r):
if p in reachable:
if r not in stoprevs:
reachable.add(r)
heads.add(r)
if p in heads and p not in stoprevs:
heads.remove(p)
return [self.node(r) for r in heads]
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in range(p + 1, len(self)):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c
def descendant(self, start, end):
if start == nullrev:
return True
for i in self.descendants(start):
if i == end:
return True
elif i > end:
break
return False
def ancestor(self, a, b):
"""calculate the least common ancestor of nodes a and b"""
# fast path, check if it is a descendant
a, b = self.rev(a), self.rev(b)
start, end = sorted((a, b))
if self.descendant(start, end):
return self.node(start)
def parents(rev):
return [p for p in self.parentrevs(rev) if p != nullrev]
c = ancestor.ancestor(a, b, parents)
if c is None:
return nullid
return self.node(c)
def _match(self, id):
if isinstance(id, (long, int)):
# rev
return self.node(id)
if len(id) == 20:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
node = id
self.rev(node) # quick search the index
return node
except LookupError:
pass # may be partial hex id
try:
# str(rev)
rev = int(id)
if str(rev) != id:
raise ValueError
if rev < 0:
rev = len(self) + rev
if rev < 0 or rev >= len(self):
raise ValueError
return self.node(rev)
except (ValueError, OverflowError):
pass
if len(id) == 40:
try:
# a full hex nodeid?
node = bin(id)
self.rev(node)
return node
except (TypeError, LookupError):
pass
def _partialmatch(self, id):
if id in self._pcache:
return self._pcache[id]
if len(id) < 40:
try:
# hex(node)[:...]
l = len(id) // 2 # grab an even number of digits
prefix = bin(id[:l * 2])
nl = [e[7] for e in self.index if e[7].startswith(prefix)]
nl = [n for n in nl if hex(n).startswith(id)]
if len(nl) > 0:
if len(nl) == 1:
self._pcache[id] = nl[0]
return nl[0]
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
return None
except TypeError:
pass
def lookup(self, id):
"""locate a node based on:
- revision number or str(revision number)
- nodeid or subset of hex nodeid
"""
n = self._match(id)
if n is not None:
return n
n = self._partialmatch(id)
if n:
return n
raise LookupError(id, self.indexfile, _('no match found'))
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node
def _addchunk(self, offset, data):
o, d = self._chunkcache
# try to add to existing cache
if o + len(d) == offset and len(d) + len(data) < _chunksize:
self._chunkcache = o, d + data
else:
self._chunkcache = offset, data
def _loadchunk(self, offset, length):
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
readahead = max(65536, length)
df.seek(offset)
d = df.read(readahead)
df.close()
self._addchunk(offset, d)
if readahead > length:
return util.buffer(d, 0, length)
return d
def _getchunk(self, offset, length):
o, d = self._chunkcache
l = len(d)
# is it in the cache?
cachestart = offset - o
cacheend = cachestart + length
if cachestart >= 0 and cacheend <= l:
if cachestart == 0 and cacheend == l:
return d # avoid a copy
return util.buffer(d, cachestart, cacheend - cachestart)
return self._loadchunk(offset, length)
def _chunkraw(self, startrev, endrev):
start = self.start(startrev)
length = self.end(endrev) - start
if self._inline:
start += (startrev + 1) * self._io.size
return self._getchunk(start, length)
def _chunk(self, rev):
return decompress(self._chunkraw(rev, rev))
def _chunkbase(self, rev):
return self._chunk(rev)
def _chunkclear(self):
self._chunkcache = (0, '')
def deltaparent(self, rev):
"""return deltaparent of the given revision"""
base = self.index[rev][3]
if base == rev:
return nullrev
elif self._generaldelta:
return base
else:
return rev - 1
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 != nullrev and self.deltaparent(rev2) == rev1:
return str(self._chunk(rev2))
return mdiff.textdiff(self.revision(rev1),
self.revision(rev2))
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = None
cachedrev = None
if node == nullid:
return ""
if self._cache:
if self._cache[0] == node:
return self._cache[2]
cachedrev = self._cache[1]
# look up what we need to read
text = None
if rev is None:
rev = self.rev(node)
# check rev flags
if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
raise RevlogError(_('incompatible revision flag %x') %
(self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
# build delta chain
chain = []
index = self.index # for performance
generaldelta = self._generaldelta
iterrev = rev
e = index[iterrev]
while iterrev != e[3] and iterrev != cachedrev:
chain.append(iterrev)
if generaldelta:
iterrev = e[3]
else:
iterrev -= 1
e = index[iterrev]
chain.reverse()
base = iterrev
if iterrev == cachedrev:
# cache hit
text = self._cache[2]
# drop cache to save memory
self._cache = None
self._chunkraw(base, rev)
if text is None:
text = str(self._chunkbase(base))
bins = [self._chunk(r) for r in chain]
text = mdiff.patches(text, bins)
text = self._checkhash(text, node, rev)
self._cache = (node, rev, text)
return text
def _checkhash(self, text, node, rev):
p1, p2 = self.parents(node)
if node != hash(text, p1, p2):
raise RevlogError(_("integrity check failed on %s:%d")
% (self.indexfile, rev))
return text
def checkinlinesize(self, tr, fp=None):
if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
return
trinfo = tr.find(self.indexfile)
if trinfo is None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
dataoff = self.start(trindex)
tr.add(self.datafile, dataoff)
if fp:
fp.flush()
fp.close()
df = self.opener(self.datafile, 'w')
try:
for r in self:
df.write(self._chunkraw(r, r))
finally:
df.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in self:
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call close, the temp file will never replace the
# real index
fp.close()
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
"""add a revision to the log
text - the revision data to add
transaction - the transaction object used for rollback
link - the linkrev data to add
p1, p2 - the parent nodeids of the revision
cachedelta - an optional precomputed delta
"""
node = hash(text, p1, p2)
if node in self.nodemap:
return node
dfh = None
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a+")
try:
return self._addrevision(node, text, transaction, link, p1, p2,
cachedelta, ifh, dfh)
finally:
if dfh:
dfh.close()
ifh.close()
def _addrevision(self, node, text, transaction, link, p1, p2,
cachedelta, ifh, dfh):
"""internal function to add revisions to the log
see addrevision for argument descriptions.
invariants:
- text is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to eachother.
"""
btext = [text]
def buildtext():
if btext[0] is not None:
return btext[0]
# flush any pending writes here so we can read it in revision
if dfh:
dfh.flush()
ifh.flush()
basetext = self.revision(self.node(cachedelta[0]))
btext[0] = mdiff.patch(basetext, cachedelta[1])
chk = hash(btext[0], p1, p2)
if chk != node:
raise RevlogError(_("consistency error in delta"))
return btext[0]
def builddelta(rev):
# can we use the cached delta?
if cachedelta and cachedelta[0] == rev:
delta = cachedelta[1]
else:
t = buildtext()
ptext = self.revision(self.node(rev))
delta = mdiff.textdiff(ptext, t)
data = compress(delta)
l = len(data[1]) + len(data[0])
if basecache[0] == rev:
chainbase = basecache[1]
else:
chainbase = self.chainbase(rev)
dist = l + offset - self.start(chainbase)
if self._generaldelta:
base = rev
else:
base = chainbase
return dist, l, data, base, chainbase
curr = len(self)
prev = curr - 1
base = chainbase = curr
offset = self.end(prev)
flags = 0
d = None
basecache = self._basecache
p1r, p2r = self.rev(p1), self.rev(p2)
# should we try to build a delta?
if prev != nullrev:
if self._generaldelta:
if p1r >= basecache[1]:
d = builddelta(p1r)
elif p2r >= basecache[1]:
d = builddelta(p2r)
else:
d = builddelta(prev)
else:
d = builddelta(prev)
dist, l, data, base, chainbase = d
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if text is None:
textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
cachedelta[1])
else:
textlen = len(text)
if d is None or dist > textlen * 2:
text = buildtext()
data = compress(text)
l = len(data[1]) + len(data[0])
base = chainbase = curr
e = (offset_type(offset, flags), l, textlen,
base, link, p1r, p2r, node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
dfh.flush()
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
if type(text) == str: # only accept immutable objects
self._cache = (node, curr, text)
self._basecache = (curr, chainbase)
return node
def group(self, nodelist, bundler, reorder=None):
"""Calculate a delta group, yielding a sequence of changegroup chunks
(strings).
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. The first delta is
first parent(nodelist[0]) -> nodelist[0], the receiver is
guaranteed to have this parent as it has all history before
these changesets. In the case firstparent is nullrev the
changegroup starts with a full revision.
"""
# if we don't have any revisions touched by these changesets, bail
if len(nodelist) == 0:
yield bundler.close()
return
# for generaldelta revlogs, we linearize the revs; this will both be
# much quicker and generate a much smaller bundle
if (self._generaldelta and reorder is not False) or reorder:
dag = dagutil.revlogdag(self)
revs = set(self.rev(n) for n in nodelist)
revs = dag.linearize(revs)
else:
revs = sorted([self.rev(n) for n in nodelist])
# add the parent of the first rev
p = self.parentrevs(revs[0])[0]
revs.insert(0, p)
# build deltas
for r in xrange(len(revs) - 1):
prev, curr = revs[r], revs[r + 1]
for c in bundler.revchunk(self, curr, prev):
yield c
yield bundler.close()
def addgroup(self, bundle, linkmapper, transaction):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
"""
# track the base of the current delta log
content = []
node = None
r = len(self)
end = 0
if r:
end = self.end(r - 1)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a")
try:
# loop through our set of deltas
chain = None
while True:
chunkdata = bundle.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
cs = chunkdata['cs']
deltabase = chunkdata['deltabase']
delta = chunkdata['delta']
content.append(node)
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
chain = node
continue
for p in (p1, p2):
if not p in self.nodemap:
raise LookupError(p, self.indexfile,
_('unknown parent'))
if deltabase not in self.nodemap:
raise LookupError(deltabase, self.indexfile,
_('unknown delta base'))
baserev = self.rev(deltabase)
chain = self._addrevision(node, None, transaction, link,
p1, p2, (baserev, delta), ifh, dfh)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
finally:
if dfh:
dfh.close()
ifh.close()
return content
def strip(self, minlink, transaction):
"""truncate the revlog on the first revision with a linkrev >= minlink
This function is called when we're stripping revision minlink and
its descendants from the repository.
We have to remove all revisions with linkrev >= minlink, because
the equivalent changelog revisions will be renumbered after the
strip.
So we truncate the revlog on the first of these revisions, and
trust that the caller has saved the revisions that shouldn't be
removed and that it'll re-add them after this truncation.
"""
if len(self) == 0:
return
for rev in self:
if self.index[rev][4] >= minlink:
break
else:
return
# first truncate the files on disk
end = self.start(rev)
if not self._inline:
transaction.add(self.datafile, end)
end = rev * self._io.size
else:
end += rev * self._io.size
transaction.add(self.indexfile, end)
# then reset internal state in memory to forget those revisions
self._cache = None
self._chunkclear()
for x in xrange(rev, len(self)):
del self.nodemap[self.node(x)]
del self.index[rev:-1]
def checksize(self):
expected = 0
if len(self):
expected = max(0, self.end(len(self) - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
f.close()
dd = actual - expected
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
f.close()
s = self._io.size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in self:
databytes += max(0, self.length(r))
dd = 0
di = actual - len(self) * s - databytes
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di)
def files(self):
res = [self.indexfile]
if not self._inline:
res.append(self.datafile)
return res
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/revlog.py | Python | gpl-3.0 | 44,332 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from view.analysis_widget import AnalysisWidget
# noinspection PyPep8Naming
class TemporalAnalysisWidget(AnalysisWidget):
# noinspection PyArgumentList
def __init__(self, mplCanvas):
"""
Construct the Temporal Analysis page in the main window. |br|
A ``ScatterPlot.mplCanvas`` will be shown on this page.
:param mplCanvas: The ``ScatterPlot.mplCanvas`` widget.
"""
super().__init__()
upperLabel = QtWidgets.QLabel("Temporal Distribution &Graph:")
upperLabel.setMargin(1)
upperLabel.setBuddy(mplCanvas)
lowerLabel = QtWidgets.QLabel("Temporal Correlation &Quotient:")
lowerLabel.setMargin(1)
lowerLabel.setBuddy(self.tableWidget)
mainLayout = QtWidgets.QVBoxLayout()
mainLayout.addWidget(upperLabel)
mainLayout.addWidget(mplCanvas)
mainLayout.addWidget(lowerLabel)
mainLayout.addWidget(self.tableWidget)
self.setLayout(mainLayout)
| yuwen41200/biodiversity-analysis | src/view/temporal_analysis_widget.py | Python | gpl-3.0 | 1,068 |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '',''))
import numpy as np
#from skgraph import datasets
from sklearn import svm
#from skgraph.ioskgraph import *
from math import sqrt
import sys
from sklearn.metrics import roc_auc_score
#"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')"
if len(sys.argv)<4:
sys.exit("python cross_validation_from_matrix_norm.py inputMatrix.libsvm C outfile")
c=float(sys.argv[2])
##TODO read from libsvm format
from sklearn.datasets import load_svmlight_file
km, target_array = load_svmlight_file(sys.argv[1])
#print km
#tolgo indice
kmgood=km[:,1:].todense()
gram=km[:,1:].todense()
for i in xrange(len(target_array)):
for j in xrange(0,len(target_array)):
#AUC cross validationprint i,j,kmgood[i,j],kmgood[i,i],kmgood[j,j]
gram[i,j]=kmgood[i,j]/sqrt(kmgood[i,i]*kmgood[j,j])
#print gram
from sklearn import cross_validation
for rs in range(42,53):
f=open(str(sys.argv[3]+".seed"+str(rs)+".c"+str(c)),'w')
kf = cross_validation.StratifiedKFold(target_array, n_folds=10, shuffle=True,random_state=rs)
#print kf
#remove column zero because
#first entry of each line is the index
#gram=km[:,1:].todense()
f.write("Total examples "+str(len(gram))+"\n")
f.write("CV\t test_AUROC\n")
#print gram
# normalization
#for i in range(len(gram)):
# for j in range(len(gram)):
# gram[i,j]=gram[i,j]/sqrt(gram[i,i]+gram[j,j])
sc=[]
for train_index, test_index in kf:
#print("TRAIN:", train_index, "TEST:", test_index)
#generated train and test lists, incuding indices of the examples in training/test
#for the specific fold. Indices starts from 0 now
clf = svm.SVC(C=c, kernel='precomputed',probability=True)
train_gram = [] #[[] for x in xrange(0,len(train))]
test_gram = []# [[] for x in xrange(0,len(test))]
#generate train matrix and test matrix
index=-1
for row in gram:
index+=1
if index in train_index:
train_gram.append([gram[index,i] for i in train_index])
else:
test_gram.append([gram[index,i] for i in train_index])
#print gram
X_train, X_test, y_train, y_test = np.array(train_gram), np.array(test_gram), target_array[train_index], target_array[test_index]
#COMPUTE INNERKFOLD
kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=rs)
inner_scores= cross_validation.cross_val_score(
clf, X_train, y_train, cv=kf, scoring='roc_auc')
#print "inner scores", inner_scores
print "Inner AUROC: %0.4f (+/- %0.4f)" % (inner_scores.mean(), inner_scores.std() / 2)
f.write(str(inner_scores.mean())+"\t")
clf.fit(X_train, y_train)
# predict on test examples
y_test_predicted=clf.predict_proba(X_test)
#print y_test_predicted
sc.append(roc_auc_score(y_test, y_test_predicted[:,1]))
f.write(str(roc_auc_score(y_test, y_test_predicted[:,1]))+"\n")
f.close()
scores=np.array(sc)
print "AUROC: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() / 2)
| nickgentoo/scikit-learn-graph | scripts/cross_validation_from_matrix_AUC_norm.py | Python | gpl-3.0 | 3,283 |
# Copyright 2008 Dan Smith <[email protected]>
# Copyright 2012 Tom Hayward <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import urllib
from glob import glob
import shutil
import time
import gtk
import gobject
gobject.threads_init()
if __name__ == "__main__":
import sys
sys.path.insert(0, "..")
from chirpui import inputdialog, common
try:
import serial
except ImportError,e:
common.log_exception()
common.show_error("\nThe Pyserial module is not installed!")
from chirp import platform, generic_xml, generic_csv, directory, util
from chirp import ic9x, kenwood_live, idrp, vx7, vx5, vx6
from chirp import CHIRP_VERSION, chirp_common, detect, errors
from chirp import icf, ic9x_icf
from chirpui import editorset, clone, miscwidgets, config, reporting, fips
from chirpui import bandplans
CONF = config.get()
KEEP_RECENT = 8
RB_BANDS = {
"--All--" : 0,
"10 meters (29MHz)" : 29,
"6 meters (54MHz)" : 5,
"2 meters (144MHz)" : 14,
"1.25 meters (220MHz)" : 22,
"70 centimeters (440MHz)" : 4,
"33 centimeters (900MHz)" : 9,
"23 centimeters (1.2GHz)" : 12,
}
def key_bands(band):
if band.startswith("-"):
return -1
amount, units, mhz = band.split(" ")
scale = units == "meters" and 100 or 1
return 100000 - (float(amount) * scale)
class ModifiedError(Exception):
pass
class ChirpMain(gtk.Window):
def get_current_editorset(self):
page = self.tabs.get_current_page()
if page is not None:
return self.tabs.get_nth_page(page)
else:
return None
def ev_tab_switched(self, pagenum=None):
def set_action_sensitive(action, sensitive):
self.menu_ag.get_action(action).set_sensitive(sensitive)
if pagenum is not None:
eset = self.tabs.get_nth_page(pagenum)
else:
eset = self.get_current_editorset()
upload_sens = bool(eset and
isinstance(eset.radio, chirp_common.CloneModeRadio))
if not eset or isinstance(eset.radio, chirp_common.LiveRadio):
save_sens = False
elif isinstance(eset.radio, chirp_common.NetworkSourceRadio):
save_sens = False
else:
save_sens = True
for i in ["import", "importsrc", "stock"]:
set_action_sensitive(i,
eset is not None and not eset.get_read_only())
for i in ["save", "saveas"]:
set_action_sensitive(i, save_sens)
for i in ["upload"]:
set_action_sensitive(i, upload_sens)
for i in ["cancelq"]:
set_action_sensitive(i, eset is not None and not save_sens)
for i in ["export", "close", "columns", "irbook", "irfinder",
"move_up", "move_dn", "exchange", "iradioreference",
"cut", "copy", "paste", "delete", "viewdeveloper"]:
set_action_sensitive(i, eset is not None)
def ev_status(self, editorset, msg):
self.sb_radio.pop(0)
self.sb_radio.push(0, msg)
def ev_usermsg(self, editorset, msg):
self.sb_general.pop(0)
self.sb_general.push(0, msg)
def ev_editor_selected(self, editorset, editortype):
mappings = {
"memedit" : ["view", "edit"],
}
for _editortype, actions in mappings.items():
for _action in actions:
action = self.menu_ag.get_action(_action)
action.set_sensitive(editortype.startswith(_editortype))
def _connect_editorset(self, eset):
eset.connect("want-close", self.do_close)
eset.connect("status", self.ev_status)
eset.connect("usermsg", self.ev_usermsg)
eset.connect("editor-selected", self.ev_editor_selected)
def do_diff_radio(self):
if self.tabs.get_n_pages() < 2:
common.show_error("Diff tabs requires at least two open tabs!")
return
esets = []
for i in range(0, self.tabs.get_n_pages()):
esets.append(self.tabs.get_nth_page(i))
d = gtk.Dialog(title="Diff Radios",
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL),
parent=self)
choices = []
for eset in esets:
choices.append("%s %s (%s)" % (eset.rthread.radio.VENDOR,
eset.rthread.radio.MODEL,
eset.filename))
choice_a = miscwidgets.make_choice(choices, False, choices[0])
choice_a.show()
chan_a = gtk.SpinButton()
chan_a.get_adjustment().set_all(1, -1, 999, 1, 10, 0)
chan_a.show()
hbox = gtk.HBox(False, 3)
hbox.pack_start(choice_a, 1, 1, 1)
hbox.pack_start(chan_a, 0, 0, 0)
hbox.show()
d.vbox.pack_start(hbox, 0, 0, 0)
choice_b = miscwidgets.make_choice(choices, False, choices[1])
choice_b.show()
chan_b = gtk.SpinButton()
chan_b.get_adjustment().set_all(1, -1, 999, 1, 10, 0)
chan_b.show()
hbox = gtk.HBox(False, 3)
hbox.pack_start(choice_b, 1, 1, 1)
hbox.pack_start(chan_b, 0, 0, 0)
hbox.show()
d.vbox.pack_start(hbox, 0, 0, 0)
r = d.run()
sel_a = choice_a.get_active_text()
sel_chan_a = chan_a.get_value()
sel_b = choice_b.get_active_text()
sel_chan_b = chan_b.get_value()
d.destroy()
if r == gtk.RESPONSE_CANCEL:
return
if sel_a == sel_b:
common.show_error("Can't diff the same tab!")
return
print "Selected %s@%i and %s@%i" % (sel_a, sel_chan_a,
sel_b, sel_chan_b)
eset_a = esets[choices.index(sel_a)]
eset_b = esets[choices.index(sel_b)]
def _show_diff(mem_b, mem_a):
# Step 3: Show the diff
diff = common.simple_diff(mem_a, mem_b)
common.show_diff_blob("Differences", diff)
def _get_mem_b(mem_a):
# Step 2: Get memory b
job = common.RadioJob(_show_diff, "get_raw_memory", int(sel_chan_b))
job.set_cb_args(mem_a)
eset_b.rthread.submit(job)
if sel_chan_a >= 0 and sel_chan_b >= 0:
# Diff numbered memory
# Step 1: Get memory a
job = common.RadioJob(_get_mem_b, "get_raw_memory", int(sel_chan_a))
eset_a.rthread.submit(job)
elif isinstance(eset_a.rthread.radio, chirp_common.CloneModeRadio) and\
isinstance(eset_b.rthread.radio, chirp_common.CloneModeRadio):
# Diff whole (can do this without a job, since both are clone-mode)
a = util.hexprint(eset_a.rthread.radio._mmap.get_packed())
b = util.hexprint(eset_b.rthread.radio._mmap.get_packed())
common.show_diff_blob("Differences", common.simple_diff(a, b))
else:
common.show_error("Cannot diff whole live-mode radios!")
def do_new(self):
eset = editorset.EditorSet(_("Untitled") + ".csv", self)
self._connect_editorset(eset)
eset.prime()
eset.show()
tab = self.tabs.append_page(eset, eset.get_tab_label())
self.tabs.set_current_page(tab)
def _do_manual_select(self, filename):
radiolist = {}
for drv, radio in directory.DRV_TO_RADIO.items():
if not issubclass(radio, chirp_common.CloneModeRadio):
continue
radiolist["%s %s" % (radio.VENDOR, radio.MODEL)] = drv
lab = gtk.Label("""<b><big>Unable to detect model!</big></b>
If you think that it is valid, you can select a radio model below to force an open attempt. If selecting the model manually works, please file a bug on the website and attach your image. If selecting the model does not work, it is likely that you are trying to open some other type of file.
""")
lab.set_justify(gtk.JUSTIFY_FILL)
lab.set_line_wrap(True)
lab.set_use_markup(True)
lab.show()
choice = miscwidgets.make_choice(sorted(radiolist.keys()), False,
sorted(radiolist.keys())[0])
d = gtk.Dialog(title="Detection Failed",
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
d.vbox.pack_start(lab, 0, 0, 0)
d.vbox.pack_start(choice, 0, 0, 0)
d.vbox.set_spacing(5)
choice.show()
d.set_default_size(400, 200)
#d.set_resizable(False)
r = d.run()
d.destroy()
if r != gtk.RESPONSE_OK:
return
try:
rc = directory.DRV_TO_RADIO[radiolist[choice.get_active_text()]]
return rc(filename)
except:
return
def do_open(self, fname=None, tempname=None):
if not fname:
types = [(_("CHIRP Radio Images") + " (*.img)", "*.img"),
(_("CHIRP Files") + " (*.chirp)", "*.chirp"),
(_("CSV Files") + " (*.csv)", "*.csv"),
(_("EVE Files (VX5)") + " (*.eve)", "*.eve"),
(_("ICF Files") + " (*.icf)", "*.icf"),
(_("VX5 Commander Files") + " (*.vx5)", "*.vx5"),
(_("VX6 Commander Files") + " (*.vx6)", "*.vx6"),
(_("VX7 Commander Files") + " (*.vx7)", "*.vx7"),
]
fname = platform.get_platform().gui_open_file(types=types)
if not fname:
return
self.record_recent_file(fname)
if icf.is_icf_file(fname):
a = common.ask_yesno_question(\
_("ICF files cannot be edited, only displayed or imported "
"into another file. Open in read-only mode?"),
self)
if not a:
return
read_only = True
else:
read_only = False
if icf.is_9x_icf(fname):
# We have to actually instantiate the IC9xICFRadio to get its
# sub-devices
radio = ic9x_icf.IC9xICFRadio(fname)
else:
try:
radio = directory.get_radio_by_image(fname)
except errors.ImageDetectFailed:
radio = self._do_manual_select(fname)
if not radio:
return
print "Manually selected %s" % radio
except Exception, e:
common.log_exception()
common.show_error(os.path.basename(fname) + ": " + str(e))
return
first_tab = False
try:
eset = editorset.EditorSet(radio, self,
filename=fname,
tempname=tempname)
except Exception, e:
common.log_exception()
common.show_error(
_("There was an error opening {fname}: {error}").format(
fname=fname,
error=e))
return
eset.set_read_only(read_only)
self._connect_editorset(eset)
eset.show()
self.tabs.append_page(eset, eset.get_tab_label())
if hasattr(eset.rthread.radio, "errors") and \
eset.rthread.radio.errors:
msg = _("{num} errors during open:").format(
num=len(eset.rthread.radio.errors))
common.show_error_text(msg,
"\r\n".join(eset.rthread.radio.errors))
def do_live_warning(self, radio):
d = gtk.MessageDialog(parent=self, buttons=gtk.BUTTONS_OK)
d.set_markup("<big><b>" + _("Note:") + "</b></big>")
msg = _("The {vendor} {model} operates in <b>live mode</b>. "
"This means that any changes you make are immediately sent "
"to the radio. Because of this, you cannot perform the "
"<u>Save</u> or <u>Upload</u> operations. If you wish to "
"edit the contents offline, please <u>Export</u> to a CSV "
"file, using the <b>File menu</b>.").format(vendor=radio.VENDOR,
model=radio.MODEL)
d.format_secondary_markup(msg)
again = gtk.CheckButton(_("Don't show this again"))
again.show()
d.vbox.pack_start(again, 0, 0, 0)
d.run()
CONF.set_bool("live_mode", again.get_active(), "noconfirm")
d.destroy()
def do_open_live(self, radio, tempname=None, read_only=False):
eset = editorset.EditorSet(radio, self, tempname=tempname)
eset.connect("want-close", self.do_close)
eset.connect("status", self.ev_status)
eset.set_read_only(read_only)
eset.show()
self.tabs.append_page(eset, eset.get_tab_label())
if isinstance(radio, chirp_common.LiveRadio):
reporting.report_model_usage(radio, "live", True)
if not CONF.get_bool("live_mode", "noconfirm"):
self.do_live_warning(radio)
def do_save(self, eset=None):
if not eset:
eset = self.get_current_editorset()
# For usability, allow Ctrl-S to short-circuit to Save-As if
# we are working on a yet-to-be-saved image
if not os.path.exists(eset.filename):
return self.do_saveas()
eset.save()
def do_saveas(self):
eset = self.get_current_editorset()
label = _("{vendor} {model} image file").format(\
vendor=eset.radio.VENDOR,
model=eset.radio.MODEL)
types = [(label + " (*.%s)" % eset.radio.FILE_EXTENSION,
eset.radio.FILE_EXTENSION)]
if isinstance(eset.radio, vx7.VX7Radio):
types += [(_("VX7 Commander") + " (*.vx7)", "vx7")]
elif isinstance(eset.radio, vx6.VX6Radio):
types += [(_("VX6 Commander") + " (*.vx6)", "vx6")]
elif isinstance(eset.radio, vx5.VX5Radio):
types += [(_("EVE") + " (*.eve)", "eve")]
types += [(_("VX5 Commander") + " (*.vx5)", "vx5")]
while True:
fname = platform.get_platform().gui_save_file(types=types)
if not fname:
return
if os.path.exists(fname):
dlg = inputdialog.OverwriteDialog(fname)
owrite = dlg.run()
dlg.destroy()
if owrite == gtk.RESPONSE_OK:
break
else:
break
try:
eset.save(fname)
except Exception,e:
d = inputdialog.ExceptionDialog(e)
d.run()
d.destroy()
def cb_clonein(self, radio, emsg=None):
radio.pipe.close()
reporting.report_model_usage(radio, "download", bool(emsg))
if not emsg:
self.do_open_live(radio, tempname="(" + _("Untitled") + ")")
else:
d = inputdialog.ExceptionDialog(emsg)
d.run()
d.destroy()
def cb_cloneout(self, radio, emsg= None):
radio.pipe.close()
reporting.report_model_usage(radio, "upload", True)
if emsg:
d = inputdialog.ExceptionDialog(emsg)
d.run()
d.destroy()
def _get_recent_list(self):
recent = []
for i in range(0, KEEP_RECENT):
fn = CONF.get("recent%i" % i, "state")
if fn:
recent.append(fn)
return recent
def _set_recent_list(self, recent):
for fn in recent:
CONF.set("recent%i" % recent.index(fn), fn, "state")
def update_recent_files(self):
i = 0
for fname in self._get_recent_list():
action_name = "recent%i" % i
path = "/MenuBar/file/recent"
old_action = self.menu_ag.get_action(action_name)
if old_action:
self.menu_ag.remove_action(old_action)
file_basename = os.path.basename(fname).replace("_", "__")
action = gtk.Action(action_name,
"_%i. %s" % (i+1, file_basename),
_("Open recent file {name}").format(name=fname),
"")
action.connect("activate", lambda a,f: self.do_open(f), fname)
mid = self.menu_uim.new_merge_id()
self.menu_uim.add_ui(mid, path,
action_name, action_name,
gtk.UI_MANAGER_MENUITEM, False)
self.menu_ag.add_action(action)
i += 1
def record_recent_file(self, filename):
recent_files = self._get_recent_list()
if filename not in recent_files:
if len(recent_files) == KEEP_RECENT:
del recent_files[-1]
recent_files.insert(0, filename)
self._set_recent_list(recent_files)
self.update_recent_files()
def import_stock_config(self, action, config):
eset = self.get_current_editorset()
count = eset.do_import(config)
def copy_shipped_stock_configs(self, stock_dir):
execpath = platform.get_platform().executable_path()
basepath = os.path.abspath(os.path.join(execpath, "stock_configs"))
if not os.path.exists(basepath):
basepath = "/usr/share/chirp/stock_configs"
files = glob(os.path.join(basepath, "*.csv"))
for fn in files:
if os.path.exists(os.path.join(stock_dir, os.path.basename(fn))):
print "Skipping existing stock config"
continue
try:
shutil.copy(fn, stock_dir)
print "Copying %s -> %s" % (fn, stock_dir)
except Exception, e:
print "ERROR: Unable to copy %s to %s: %s" % (fn, stock_dir, e)
return False
return True
def update_stock_configs(self):
stock_dir = platform.get_platform().config_file("stock_configs")
if not os.path.isdir(stock_dir):
try:
os.mkdir(stock_dir)
except Exception, e:
print "ERROR: Unable to create directory: %s" % stock_dir
return
if not self.copy_shipped_stock_configs(stock_dir):
return
def _do_import_action(config):
name = os.path.splitext(os.path.basename(config))[0]
action_name = "stock-%i" % configs.index(config)
path = "/MenuBar/radio/stock"
action = gtk.Action(action_name,
name,
_("Import stock "
"configuration {name}").format(name=name),
"")
action.connect("activate", self.import_stock_config, config)
mid = self.menu_uim.new_merge_id()
mid = self.menu_uim.add_ui(mid, path,
action_name, action_name,
gtk.UI_MANAGER_MENUITEM, False)
self.menu_ag.add_action(action)
def _do_open_action(config):
name = os.path.splitext(os.path.basename(config))[0]
action_name = "openstock-%i" % configs.index(config)
path = "/MenuBar/file/openstock"
action = gtk.Action(action_name,
name,
_("Open stock "
"configuration {name}").format(name=name),
"")
action.connect("activate", lambda a,c: self.do_open(c), config)
mid = self.menu_uim.new_merge_id()
mid = self.menu_uim.add_ui(mid, path,
action_name, action_name,
gtk.UI_MANAGER_MENUITEM, False)
self.menu_ag.add_action(action)
configs = glob(os.path.join(stock_dir, "*.csv"))
for config in configs:
_do_import_action(config)
_do_open_action(config)
def _confirm_experimental(self, rclass):
sql_key = "warn_experimental_%s" % directory.radio_class_id(rclass)
if CONF.is_defined(sql_key, "state") and \
not CONF.get_bool(sql_key, "state"):
return True
title = _("Proceed with experimental driver?")
text = rclass.get_prompts().experimental
msg = _("This radio's driver is experimental. "
"Do you want to proceed?")
resp, squelch = common.show_warning(msg, text,
title=title,
buttons=gtk.BUTTONS_YES_NO,
can_squelch=True)
if resp == gtk.RESPONSE_YES:
CONF.set_bool(sql_key, not squelch, "state")
return resp == gtk.RESPONSE_YES
def _show_instructions(self, radio, message):
if message is None:
return
if CONF.get_bool("clone_instructions", "noconfirm"):
return
d = gtk.MessageDialog(parent=self, buttons=gtk.BUTTONS_OK)
d.set_markup("<big><b>" + _("{name} Instructions").format(
name=radio.get_name()) + "</b></big>")
msg = _("{instructions}").format(instructions=message)
d.format_secondary_markup(msg)
again = gtk.CheckButton(_("Don't show instructions for any radio again"))
again.show()
d.vbox.pack_start(again, 0, 0, 0)
h_button_box = d.vbox.get_children()[2]
try:
ok_button = h_button_box.get_children()[0]
ok_button.grab_default()
ok_button.grab_focus()
except AttributeError:
# don't grab focus on GTK+ 2.0
pass
d.run()
d.destroy()
CONF.set_bool("clone_instructions", again.get_active(), "noconfirm")
def do_download(self, port=None, rtype=None):
d = clone.CloneSettingsDialog(parent=self)
settings = d.run()
d.destroy()
if not settings:
return
rclass = settings.radio_class
if issubclass(rclass, chirp_common.ExperimentalRadio) and \
not self._confirm_experimental(rclass):
# User does not want to proceed with experimental driver
return
self._show_instructions(rclass, rclass.get_prompts().pre_download)
print "User selected %s %s on port %s" % (rclass.VENDOR,
rclass.MODEL,
settings.port)
try:
ser = serial.Serial(port=settings.port,
baudrate=rclass.BAUD_RATE,
rtscts=rclass.HARDWARE_FLOW,
timeout=0.25)
ser.flushInput()
except serial.SerialException, e:
d = inputdialog.ExceptionDialog(e)
d.run()
d.destroy()
return
radio = settings.radio_class(ser)
fn = tempfile.mktemp()
if isinstance(radio, chirp_common.CloneModeRadio):
ct = clone.CloneThread(radio, "in", cb=self.cb_clonein, parent=self)
ct.start()
else:
self.do_open_live(radio)
def do_upload(self, port=None, rtype=None):
eset = self.get_current_editorset()
radio = eset.radio
settings = clone.CloneSettings()
settings.radio_class = radio.__class__
d = clone.CloneSettingsDialog(settings, parent=self)
settings = d.run()
d.destroy()
if not settings:
return
if isinstance(radio, chirp_common.ExperimentalRadio) and \
not self._confirm_experimental(radio.__class__):
# User does not want to proceed with experimental driver
return
try:
ser = serial.Serial(port=settings.port,
baudrate=radio.BAUD_RATE,
rtscts=radio.HARDWARE_FLOW,
timeout=0.25)
ser.flushInput()
except serial.SerialException, e:
d = inputdialog.ExceptionDialog(e)
d.run()
d.destroy()
return
self._show_instructions(radio, radio.get_prompts().pre_upload)
radio.set_pipe(ser)
ct = clone.CloneThread(radio, "out", cb=self.cb_cloneout, parent=self)
ct.start()
def do_close(self, tab_child=None):
if tab_child:
eset = tab_child
else:
eset = self.get_current_editorset()
if not eset:
return False
if eset.is_modified():
dlg = miscwidgets.YesNoDialog(title=_("Save Changes?"),
parent=self,
buttons=(gtk.STOCK_YES, gtk.RESPONSE_YES,
gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
dlg.set_text(_("File is modified, save changes before closing?"))
res = dlg.run()
dlg.destroy()
if res == gtk.RESPONSE_YES:
self.do_save(eset)
elif res == gtk.RESPONSE_CANCEL:
raise ModifiedError()
eset.rthread.stop()
eset.rthread.join()
eset.prepare_close()
if eset.radio.pipe:
eset.radio.pipe.close()
if isinstance(eset.radio, chirp_common.LiveRadio):
action = self.menu_ag.get_action("openlive")
if action:
action.set_sensitive(True)
page = self.tabs.page_num(eset)
if page is not None:
self.tabs.remove_page(page)
return True
def do_import(self):
types = [(_("CHIRP Files") + " (*.chirp)", "*.chirp"),
(_("CHIRP Radio Images") + " (*.img)", "*.img"),
(_("CSV Files") + " (*.csv)", "*.csv"),
(_("EVE Files (VX5)") + " (*.eve)", "*.eve"),
(_("ICF Files") + " (*.icf)", "*.icf"),
(_("Kenwood HMK Files") + " (*.hmk)", "*.hmk"),
(_("Kenwood ITM Files") + " (*.itm)", "*.itm"),
(_("Travel Plus Files") + " (*.tpe)", "*.tpe"),
(_("VX5 Commander Files") + " (*.vx5)", "*.vx5"),
(_("VX6 Commander Files") + " (*.vx6)", "*.vx6"),
(_("VX7 Commander Files") + " (*.vx7)", "*.vx7")]
filen = platform.get_platform().gui_open_file(types=types)
if not filen:
return
eset = self.get_current_editorset()
count = eset.do_import(filen)
reporting.report_model_usage(eset.rthread.radio, "import", count > 0)
def do_repeaterbook_prompt(self):
if not CONF.get_bool("has_seen_credit", "repeaterbook"):
d = gtk.MessageDialog(parent=self, buttons=gtk.BUTTONS_OK)
d.set_markup("<big><big><b>RepeaterBook</b></big>\r\n" + \
"<i>North American Repeater Directory</i></big>")
d.format_secondary_markup("For more information about this " +\
"free service, please go to\r\n" +\
"http://www.repeaterbook.com")
d.run()
d.destroy()
CONF.set_bool("has_seen_credit", True, "repeaterbook")
default_state = "Oregon"
default_county = "--All--"
default_band = "--All--"
try:
try:
code = int(CONF.get("state", "repeaterbook"))
except:
code = CONF.get("state", "repeaterbook")
for k,v in fips.FIPS_STATES.items():
if code == v:
default_state = k
break
code = CONF.get("county", "repeaterbook")
for k,v in fips.FIPS_COUNTIES[fips.FIPS_STATES[default_state]].items():
if code == v:
default_county = k
break
code = int(CONF.get("band", "repeaterbook"))
for k,v in RB_BANDS.items():
if code == v:
default_band = k
break
except:
pass
state = miscwidgets.make_choice(sorted(fips.FIPS_STATES.keys()),
False, default_state)
county = miscwidgets.make_choice(sorted(fips.FIPS_COUNTIES[fips.FIPS_STATES[default_state]].keys()),
False, default_county)
band = miscwidgets.make_choice(sorted(RB_BANDS.keys(), key=key_bands),
False, default_band)
def _changed(box, county):
state = fips.FIPS_STATES[box.get_active_text()]
county.get_model().clear()
for fips_county in sorted(fips.FIPS_COUNTIES[state].keys()):
county.append_text(fips_county)
county.set_active(0)
state.connect("changed", _changed, county)
d = inputdialog.FieldDialog(title=_("RepeaterBook Query"), parent=self)
d.add_field("State", state)
d.add_field("County", county)
d.add_field("Band", band)
r = d.run()
d.destroy()
if r != gtk.RESPONSE_OK:
return False
code = fips.FIPS_STATES[state.get_active_text()]
county_id = fips.FIPS_COUNTIES[code][county.get_active_text()]
freq = RB_BANDS[band.get_active_text()]
CONF.set("state", str(code), "repeaterbook")
CONF.set("county", str(county_id), "repeaterbook")
CONF.set("band", str(freq), "repeaterbook")
return True
def do_repeaterbook(self, do_import):
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
if not self.do_repeaterbook_prompt():
self.window.set_cursor(None)
return
try:
code = "%02i" % int(CONF.get("state", "repeaterbook"))
except:
try:
code = CONF.get("state", "repeaterbook")
except:
code = '41' # Oregon default
try:
county = CONF.get("county", "repeaterbook")
except:
county = '%' # --All-- default
try:
band = int(CONF.get("band", "repeaterbook"))
except:
band = 14 # 2m default
query = "http://www.repeaterbook.com/repeaters/downloads/chirp.php?" + \
"func=default&state_id=%s&band=%s&freq=%%&band6=%%&loc=%%" + \
"&county_id=%s&status_id=%%&features=%%&coverage=%%&use=%%"
query = query % (code, band and band or "%%", county and county or "%%")
# Do this in case the import process is going to take a while
# to make sure we process events leading up to this
gtk.gdk.window_process_all_updates()
while gtk.events_pending():
gtk.main_iteration(False)
fn = tempfile.mktemp(".csv")
filename, headers = urllib.urlretrieve(query, fn)
if not os.path.exists(filename):
print "Failed, headers were:"
print str(headers)
common.show_error(_("RepeaterBook query failed"))
self.window.set_cursor(None)
return
class RBRadio(generic_csv.CSVRadio,
chirp_common.NetworkSourceRadio):
VENDOR = "RepeaterBook"
MODEL = ""
try:
# Validate CSV
radio = RBRadio(filename)
if radio.errors:
reporting.report_misc_error("repeaterbook",
("query=%s\n" % query) +
("\n") +
("\n".join(radio.errors)))
except errors.InvalidDataError, e:
common.show_error(str(e))
self.window.set_cursor(None)
return
except Exception, e:
common.log_exception()
reporting.report_model_usage(radio, "import", True)
self.window.set_cursor(None)
if do_import:
eset = self.get_current_editorset()
count = eset.do_import(filename)
else:
self.do_open_live(radio, read_only=True)
def do_przemienniki_prompt(self):
d = inputdialog.FieldDialog(title='przemienniki.net query',
parent=self)
fields = {
"Country":
(miscwidgets.make_choice(['by', 'cz', 'de', 'lt', 'pl',
'sk', 'uk'], False),
lambda x: str(x.get_active_text())),
"Band":
(miscwidgets.make_choice(['10m', '4m', '6m', '2m', '70cm',
'23cm', '13cm', '3cm'], False, '2m'),
lambda x: str(x.get_active_text())),
"Mode":
(miscwidgets.make_choice(['fm', 'dv'], False),
lambda x: str(x.get_active_text())),
"Only Working":
(miscwidgets.make_choice(['', 'yes'], False),
lambda x: str(x.get_active_text())),
"Latitude": (gtk.Entry(), lambda x: float(x.get_text())),
"Longitude": (gtk.Entry(), lambda x: float(x.get_text())),
"Range": (gtk.Entry(), lambda x: int(x.get_text())),
}
for name in sorted(fields.keys()):
value, fn = fields[name]
d.add_field(name, value)
while d.run() == gtk.RESPONSE_OK:
query = "http://przemienniki.net/export/chirp.csv?"
args = []
for name, (value, fn) in fields.items():
if isinstance(value, gtk.Entry):
contents = value.get_text()
else:
contents = value.get_active_text()
if contents:
try:
_value = fn(value)
except ValueError:
common.show_error(_("Invalid value for %s") % name)
query = None
continue
args.append("=".join((name.replace(" ", "").lower(),
contents)))
query += "&".join(args)
print query
d.destroy()
return query
d.destroy()
return query
def do_przemienniki(self, do_import):
url = self.do_przemienniki_prompt()
if not url:
return
fn = tempfile.mktemp(".csv")
filename, headers = urllib.urlretrieve(url, fn)
if not os.path.exists(filename):
print "Failed, headers were:"
print str(headers)
common.show_error(_("Query failed"))
return
class PRRadio(generic_csv.CSVRadio,
chirp_common.NetworkSourceRadio):
VENDOR = "przemienniki.net"
MODEL = ""
try:
radio = PRRadio(filename)
except Exception, e:
common.show_error(str(e))
return
if do_import:
eset = self.get_current_editorset()
count = eset.do_import(filename)
else:
self.do_open_live(radio, read_only=True)
def do_rfinder_prompt(self):
fields = {"1Email" : (gtk.Entry(),
lambda x: "@" in x),
"2Password" : (gtk.Entry(),
lambda x: x),
"3Latitude" : (gtk.Entry(),
lambda x: float(x) < 90 and \
float(x) > -90),
"4Longitude": (gtk.Entry(),
lambda x: float(x) < 180 and \
float(x) > -180),
"5Range_in_Miles": (gtk.Entry(),
lambda x: int(x) > 0 and int(x) < 5000),
}
d = inputdialog.FieldDialog(title="RFinder Login", parent=self)
for k in sorted(fields.keys()):
d.add_field(k[1:].replace("_", " "), fields[k][0])
fields[k][0].set_text(CONF.get(k[1:], "rfinder") or "")
fields[k][0].set_visibility(k != "2Password")
while d.run() == gtk.RESPONSE_OK:
valid = True
for k in sorted(fields.keys()):
widget, validator = fields[k]
try:
if validator(widget.get_text()):
CONF.set(k[1:], widget.get_text(), "rfinder")
continue
except Exception:
pass
common.show_error("Invalid value for %s" % k[1:])
valid = False
break
if valid:
d.destroy()
return True
d.destroy()
return False
def do_rfinder(self, do_import):
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
if not self.do_rfinder_prompt():
self.window.set_cursor(None)
return
lat = CONF.get_float("Latitude", "rfinder")
lon = CONF.get_float("Longitude", "rfinder")
passwd = CONF.get("Password", "rfinder")
email = CONF.get("Email", "rfinder")
miles = CONF.get_int("Range_in_Miles", "rfinder")
# Do this in case the import process is going to take a while
# to make sure we process events leading up to this
gtk.gdk.window_process_all_updates()
while gtk.events_pending():
gtk.main_iteration(False)
if do_import:
eset = self.get_current_editorset()
count = eset.do_import("rfinder://%s/%s/%f/%f/%i" % (email, passwd, lat, lon, miles))
else:
from chirp import rfinder
radio = rfinder.RFinderRadio(None)
radio.set_params((lat, lon), miles, email, passwd)
self.do_open_live(radio, read_only=True)
self.window.set_cursor(None)
def do_radioreference_prompt(self):
fields = {"1Username" : (gtk.Entry(), lambda x: x),
"2Password" : (gtk.Entry(), lambda x: x),
"3Zipcode" : (gtk.Entry(), lambda x: x),
}
d = inputdialog.FieldDialog(title=_("RadioReference.com Query"),
parent=self)
for k in sorted(fields.keys()):
d.add_field(k[1:], fields[k][0])
fields[k][0].set_text(CONF.get(k[1:], "radioreference") or "")
fields[k][0].set_visibility(k != "2Password")
while d.run() == gtk.RESPONSE_OK:
valid = True
for k in sorted(fields.keys()):
widget, validator = fields[k]
try:
if validator(widget.get_text()):
CONF.set(k[1:], widget.get_text(), "radioreference")
continue
except Exception:
pass
common.show_error("Invalid value for %s" % k[1:])
valid = False
break
if valid:
d.destroy()
return True
d.destroy()
return False
def do_radioreference(self, do_import):
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
if not self.do_radioreference_prompt():
self.window.set_cursor(None)
return
username = CONF.get("Username", "radioreference")
passwd = CONF.get("Password", "radioreference")
zipcode = CONF.get("Zipcode", "radioreference")
# Do this in case the import process is going to take a while
# to make sure we process events leading up to this
gtk.gdk.window_process_all_updates()
while gtk.events_pending():
gtk.main_iteration(False)
if do_import:
eset = self.get_current_editorset()
count = eset.do_import("radioreference://%s/%s/%s" % (zipcode, username, passwd))
else:
try:
from chirp import radioreference
radio = radioreference.RadioReferenceRadio(None)
radio.set_params(zipcode, username, passwd)
self.do_open_live(radio, read_only=True)
except errors.RadioError, e:
common.show_error(e)
self.window.set_cursor(None)
def do_export(self):
types = [(_("CSV Files") + " (*.csv)", "csv"),
(_("CHIRP Files") + " (*.chirp)", "chirp"),
]
eset = self.get_current_editorset()
if os.path.exists(eset.filename):
base = os.path.basename(eset.filename)
if "." in base:
base = base[:base.rindex(".")]
defname = base
else:
defname = "radio"
filen = platform.get_platform().gui_save_file(default_name=defname,
types=types)
if not filen:
return
if os.path.exists(filen):
dlg = inputdialog.OverwriteDialog(filen)
owrite = dlg.run()
dlg.destroy()
if owrite != gtk.RESPONSE_OK:
return
os.remove(filen)
count = eset.do_export(filen)
reporting.report_model_usage(eset.rthread.radio, "export", count > 0)
def do_about(self):
d = gtk.AboutDialog()
d.set_transient_for(self)
import sys
verinfo = "GTK %s\nPyGTK %s\nPython %s\n" % ( \
".".join([str(x) for x in gtk.gtk_version]),
".".join([str(x) for x in gtk.pygtk_version]),
sys.version.split()[0])
d.set_name("CHIRP")
d.set_version(CHIRP_VERSION)
d.set_copyright("Copyright 2013 Dan Smith (KK7DS)")
d.set_website("http://chirp.danplanet.com")
d.set_authors(("Dan Smith KK7DS <[email protected]>",
_("With significant contributions from:"),
"Tom KD7LXL",
"Marco IZ3GME",
"Jim KC9HI"
))
d.set_translator_credits("Polish: Grzegorz SQ2RBY" +
os.linesep +
"Italian: Fabio IZ2QDH" +
os.linesep +
"Dutch: Michael PD4MT" +
os.linesep +
"German: Benjamin HB9EUK" +
os.linesep +
"Hungarian: Attila HA7JA" +
os.linesep +
"Russian: Dmitry Slukin" +
os.linesep +
"Portuguese (BR): Crezivando PP7CJ")
d.set_comments(verinfo)
d.run()
d.destroy()
def do_documentation(self):
d = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, parent=self,
type=gtk.MESSAGE_INFO)
d.set_markup("<b><big>" + _("CHIRP Documentation") + "</big></b>\r\n")
msg = _("Documentation for CHIRP, including FAQs, and help for common "
"problems is available on the CHIRP web site, please go to\n\n"
"<a href=\"http://chirp.danplanet.com/projects/chirp/wiki/"
"Documentation\">"
"http://chirp.danplanet.com/projects/chirp/wiki/"
"Documentation</a>\n")
d.format_secondary_markup(msg.replace("\n","\r\n"))
d.run()
d.destroy()
def do_columns(self):
eset = self.get_current_editorset()
driver = directory.get_driver(eset.rthread.radio.__class__)
radio_name = "%s %s %s" % (eset.rthread.radio.VENDOR,
eset.rthread.radio.MODEL,
eset.rthread.radio.VARIANT)
d = gtk.Dialog(title=_("Select Columns"),
parent=self,
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
vbox = gtk.VBox()
vbox.show()
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
sw.add_with_viewport(vbox)
sw.show()
d.vbox.pack_start(sw, 1, 1, 1)
d.set_size_request(-1, 300)
d.set_resizable(False)
label = gtk.Label(_("Visible columns for {radio}").format(radio=radio_name))
label.show()
vbox.pack_start(label)
fields = []
memedit = eset.get_current_editor() #.editors["memedit"]
unsupported = memedit.get_unsupported_columns()
for colspec in memedit.cols:
if colspec[0].startswith("_"):
continue
elif colspec[0] in unsupported:
continue
label = colspec[0]
visible = memedit.get_column_visible(memedit.col(label))
widget = gtk.CheckButton(label)
widget.set_active(visible)
fields.append(widget)
vbox.pack_start(widget, 1, 1, 1)
widget.show()
res = d.run()
selected_columns = []
if res == gtk.RESPONSE_OK:
for widget in fields:
colnum = memedit.col(widget.get_label())
memedit.set_column_visible(colnum, widget.get_active())
if widget.get_active():
selected_columns.append(widget.get_label())
d.destroy()
CONF.set(driver, ",".join(selected_columns), "memedit_columns")
def do_hide_unused(self, action):
eset = self.get_current_editorset()
if eset is None:
conf = config.get("memedit")
conf.set_bool("hide_unused", action.get_active())
else:
for editortype, editor in eset.editors.iteritems():
if "memedit" in editortype:
editor.set_hide_unused(action.get_active())
def do_clearq(self):
eset = self.get_current_editorset()
eset.rthread.flush()
def do_copy(self, cut):
eset = self.get_current_editorset()
eset.get_current_editor().copy_selection(cut)
def do_paste(self):
eset = self.get_current_editorset()
eset.get_current_editor().paste_selection()
def do_delete(self):
eset = self.get_current_editorset()
eset.get_current_editor().copy_selection(True)
def do_toggle_report(self, action):
if not action.get_active():
d = gtk.MessageDialog(buttons=gtk.BUTTONS_YES_NO,
parent=self)
d.set_markup("<b><big>" + _("Reporting is disabled") + "</big></b>")
msg = _("The reporting feature of CHIRP is designed to help "
"<u>improve quality</u> by allowing the authors to focus "
"on the radio drivers used most often and errors "
"experienced by the users. The reports contain no "
"identifying information and are used only for statistical "
"purposes by the authors. Your privacy is extremely "
"important, but <u>please consider leaving this feature "
"enabled to help make CHIRP better!</u>\n\n<b>Are you "
"sure you want to disable this feature?</b>")
d.format_secondary_markup(msg.replace("\n", "\r\n"))
r = d.run()
d.destroy()
if r == gtk.RESPONSE_NO:
action.set_active(not action.get_active())
conf = config.get()
conf.set_bool("no_report", not action.get_active())
def do_toggle_no_smart_tmode(self, action):
CONF.set_bool("no_smart_tmode", not action.get_active(), "memedit")
def do_toggle_developer(self, action):
conf = config.get()
conf.set_bool("developer", action.get_active(), "state")
for name in ["viewdeveloper", "loadmod"]:
devaction = self.menu_ag.get_action(name)
devaction.set_visible(action.get_active())
def do_change_language(self):
langs = ["Auto", "English", "Polish", "Italian", "Dutch", "German",
"Hungarian", "Russian", "Portuguese (BR)"]
d = inputdialog.ChoiceDialog(langs, parent=self,
title="Choose Language")
d.label.set_text(_("Choose a language or Auto to use the "
"operating system default. You will need to "
"restart the application before the change "
"will take effect"))
d.label.set_line_wrap(True)
r = d.run()
if r == gtk.RESPONSE_OK:
print "Chose language %s" % d.choice.get_active_text()
conf = config.get()
conf.set("language", d.choice.get_active_text(), "state")
d.destroy()
def load_module(self):
types = [(_("Python Modules") + "*.py", "*.py")]
filen = platform.get_platform().gui_open_file(types=types)
if not filen:
return
# We're in development mode, so we need to tell the directory to
# allow a loaded module to override an existing driver, against
# its normal better judgement
directory.enable_reregistrations()
try:
module = file(filen)
code = module.read()
module.close()
pyc = compile(code, filen, 'exec')
# See this for why:
# http://stackoverflow.com/questions/2904274/globals-and-locals-in-python-exec
exec(pyc, globals(), globals())
except Exception, e:
common.log_exception()
common.show_error("Unable to load module: %s" % e)
def mh(self, _action, *args):
action = _action.get_name()
if action == "quit":
gtk.main_quit()
elif action == "new":
self.do_new()
elif action == "open":
self.do_open()
elif action == "save":
self.do_save()
elif action == "saveas":
self.do_saveas()
elif action.startswith("download"):
self.do_download(*args)
elif action.startswith("upload"):
self.do_upload(*args)
elif action == "close":
self.do_close()
elif action == "import":
self.do_import()
elif action in ["qrfinder", "irfinder"]:
self.do_rfinder(action[0] == "i")
elif action in ["qradioreference", "iradioreference"]:
self.do_radioreference(action[0] == "i")
elif action == "export":
self.do_export()
elif action in ["qrbook", "irbook"]:
self.do_repeaterbook(action[0] == "i")
elif action in ["qpr", "ipr"]:
self.do_przemienniki(action[0] == "i")
elif action == "about":
self.do_about()
elif action == "documentation":
self.do_documentation()
elif action == "columns":
self.do_columns()
elif action == "hide_unused":
self.do_hide_unused(_action)
elif action == "cancelq":
self.do_clearq()
elif action == "report":
self.do_toggle_report(_action)
elif action == "channel_defaults":
# The memedit thread also has an instance of bandplans.
bp = bandplans.BandPlans(CONF)
bp.select_bandplan(self)
elif action == "no_smart_tmode":
self.do_toggle_no_smart_tmode(_action)
elif action == "developer":
self.do_toggle_developer(_action)
elif action in ["cut", "copy", "paste", "delete",
"move_up", "move_dn", "exchange",
"devshowraw", "devdiffraw"]:
self.get_current_editorset().get_current_editor().hotkey(_action)
elif action == "devdifftab":
self.do_diff_radio()
elif action == "language":
self.do_change_language()
elif action == "loadmod":
self.load_module()
else:
return
self.ev_tab_switched()
def make_menubar(self):
menu_xml = """
<ui>
<menubar name="MenuBar">
<menu action="file">
<menuitem action="new"/>
<menuitem action="open"/>
<menu action="openstock" name="openstock"/>
<menu action="recent" name="recent"/>
<menuitem action="save"/>
<menuitem action="saveas"/>
<menuitem action="loadmod"/>
<separator/>
<menuitem action="import"/>
<menuitem action="export"/>
<separator/>
<menuitem action="close"/>
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="cut"/>
<menuitem action="copy"/>
<menuitem action="paste"/>
<menuitem action="delete"/>
<separator/>
<menuitem action="move_up"/>
<menuitem action="move_dn"/>
<menuitem action="exchange"/>
</menu>
<menu action="view">
<menuitem action="columns"/>
<menuitem action="hide_unused"/>
<menuitem action="no_smart_tmode"/>
<menu action="viewdeveloper">
<menuitem action="devshowraw"/>
<menuitem action="devdiffraw"/>
<menuitem action="devdifftab"/>
</menu>
<menuitem action="language"/>
</menu>
<menu action="radio" name="radio">
<menuitem action="download"/>
<menuitem action="upload"/>
<menu action="importsrc" name="importsrc">
<menuitem action="iradioreference"/>
<menuitem action="irbook"/>
<menuitem action="ipr"/>
<menuitem action="irfinder"/>
</menu>
<menu action="querysrc" name="querysrc">
<menuitem action="qradioreference"/>
<menuitem action="qrbook"/>
<menuitem action="qpr"/>
<menuitem action="qrfinder"/>
</menu>
<menu action="stock" name="stock"/>
<separator/>
<menuitem action="channel_defaults"/>
<separator/>
<menuitem action="cancelq"/>
</menu>
<menu action="help">
<menuitem action="about"/>
<menuitem action="documentation"/>
<menuitem action="report"/>
<menuitem action="developer"/>
</menu>
</menubar>
</ui>
"""
actions = [\
('file', None, _("_File"), None, None, self.mh),
('new', gtk.STOCK_NEW, None, None, None, self.mh),
('open', gtk.STOCK_OPEN, None, None, None, self.mh),
('openstock', None, _("Open stock config"), None, None, self.mh),
('recent', None, _("_Recent"), None, None, self.mh),
('save', gtk.STOCK_SAVE, None, None, None, self.mh),
('saveas', gtk.STOCK_SAVE_AS, None, None, None, self.mh),
('loadmod', None, _("Load Module"), None, None, self.mh),
('close', gtk.STOCK_CLOSE, None, None, None, self.mh),
('quit', gtk.STOCK_QUIT, None, None, None, self.mh),
('edit', None, _("_Edit"), None, None, self.mh),
('cut', None, _("_Cut"), "<Ctrl>x", None, self.mh),
('copy', None, _("_Copy"), "<Ctrl>c", None, self.mh),
('paste', None, _("_Paste"), "<Ctrl>v", None, self.mh),
('delete', None, _("_Delete"), "Delete", None, self.mh),
('move_up', None, _("Move _Up"), "<Control>Up", None, self.mh),
('move_dn', None, _("Move Dow_n"), "<Control>Down", None, self.mh),
('exchange', None, _("E_xchange"), "<Control><Shift>x", None, self.mh),
('view', None, _("_View"), None, None, self.mh),
('columns', None, _("Columns"), None, None, self.mh),
('viewdeveloper', None, _("Developer"), None, None, self.mh),
('devshowraw', None, _('Show raw memory'), "<Control><Shift>r", None, self.mh),
('devdiffraw', None, _("Diff raw memories"), "<Control><Shift>d", None, self.mh),
('devdifftab', None, _("Diff tabs"), "<Control><Shift>t", None, self.mh),
('language', None, _("Change language"), None, None, self.mh),
('radio', None, _("_Radio"), None, None, self.mh),
('download', None, _("Download From Radio"), "<Alt>d", None, self.mh),
('upload', None, _("Upload To Radio"), "<Alt>u", None, self.mh),
('import', None, _("Import"), "<Alt>i", None, self.mh),
('export', None, _("Export"), "<Alt>x", None, self.mh),
('importsrc', None, _("Import from data source"), None, None, self.mh),
('iradioreference', None, _("RadioReference.com"), None, None, self.mh),
('irfinder', None, _("RFinder"), None, None, self.mh),
('irbook', None, _("RepeaterBook"), None, None, self.mh),
('ipr', None, _("przemienniki.net"), None, None, self.mh),
('querysrc', None, _("Query data source"), None, None, self.mh),
('qradioreference', None, _("RadioReference.com"), None, None, self.mh),
('qrfinder', None, _("RFinder"), None, None, self.mh),
('qpr', None, _("przemienniki.net"), None, None, self.mh),
('qrbook', None, _("RepeaterBook"), None, None, self.mh),
('export_chirp', None, _("CHIRP Native File"), None, None, self.mh),
('export_csv', None, _("CSV File"), None, None, self.mh),
('stock', None, _("Import from stock config"), None, None, self.mh),
('channel_defaults', None, _("Channel defaults"), None, None, self.mh),
('cancelq', gtk.STOCK_STOP, None, "Escape", None, self.mh),
('help', None, _('Help'), None, None, self.mh),
('about', gtk.STOCK_ABOUT, None, None, None, self.mh),
('documentation', None, _("Documentation"), None, None, self.mh),
]
conf = config.get()
re = not conf.get_bool("no_report");
hu = conf.get_bool("hide_unused", "memedit")
dv = conf.get_bool("developer", "state")
st = not conf.get_bool("no_smart_tmode", "memedit")
toggles = [\
('report', None, _("Report statistics"), None, None, self.mh, re),
('hide_unused', None, _("Hide Unused Fields"), None, None, self.mh, hu),
('no_smart_tmode', None, _("Smart Tone Modes"), None, None, self.mh, st),
('developer', None, _("Enable Developer Functions"), None, None, self.mh, dv),
]
self.menu_uim = gtk.UIManager()
self.menu_ag = gtk.ActionGroup("MenuBar")
self.menu_ag.add_actions(actions)
self.menu_ag.add_toggle_actions(toggles)
self.menu_uim.insert_action_group(self.menu_ag, 0)
self.menu_uim.add_ui_from_string(menu_xml)
self.add_accel_group(self.menu_uim.get_accel_group())
self.recentmenu = self.menu_uim.get_widget("/MenuBar/file/recent")
# Initialize
self.do_toggle_developer(self.menu_ag.get_action("developer"))
return self.menu_uim.get_widget("/MenuBar")
def make_tabs(self):
self.tabs = gtk.Notebook()
return self.tabs
def close_out(self):
num = self.tabs.get_n_pages()
while num > 0:
num -= 1
print "Closing %i" % num
try:
self.do_close(self.tabs.get_nth_page(num))
except ModifiedError:
return False
gtk.main_quit()
return True
def make_status_bar(self):
box = gtk.HBox(False, 2)
self.sb_general = gtk.Statusbar()
self.sb_general.set_has_resize_grip(False)
self.sb_general.show()
box.pack_start(self.sb_general, 1,1,1)
self.sb_radio = gtk.Statusbar()
self.sb_radio.set_has_resize_grip(True)
self.sb_radio.show()
box.pack_start(self.sb_radio, 1,1,1)
box.show()
return box
def ev_delete(self, window, event):
if not self.close_out():
return True # Don't exit
def ev_destroy(self, window):
if not self.close_out():
return True # Don't exit
def setup_extra_hotkeys(self):
accelg = self.menu_uim.get_accel_group()
memedit = lambda a: self.get_current_editorset().editors["memedit"].hotkey(a)
actions = [
# ("action_name", "key", function)
]
for name, key, fn in actions:
a = gtk.Action(name, name, name, "")
a.connect("activate", fn)
self.menu_ag.add_action_with_accel(a, key)
a.set_accel_group(accelg)
a.connect_accelerator()
def _set_icon(self):
execpath = platform.get_platform().executable_path()
path = os.path.abspath(os.path.join(execpath, "share", "chirp.png"))
if not os.path.exists(path):
path = "/usr/share/pixmaps/chirp.png"
if os.path.exists(path):
self.set_icon_from_file(path)
else:
print "Icon %s not found" % path
def _updates(self, version):
if not version:
return
if version == CHIRP_VERSION:
return
print "Server reports version %s is available" % version
# Report new updates every seven days
intv = 3600 * 24 * 7
if CONF.is_defined("last_update_check", "state") and \
(time.time() - CONF.get_int("last_update_check", "state")) < intv:
return
CONF.set_int("last_update_check", int(time.time()), "state")
d = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, parent=self,
type=gtk.MESSAGE_INFO)
d.set_property("text",
_("A new version of CHIRP is available: " +
"{ver}. ".format(ver=version) +
"It is recommended that you upgrade, so " +
"go to http://chirp.danplanet.com soon!"))
d.run()
d.destroy()
def _init_macos(self, menu_bar):
try:
import gtk_osxapplication
macapp = gtk_osxapplication.OSXApplication()
except ImportError, e:
print "No MacOS support: %s" % e
return
menu_bar.hide()
macapp.set_menu_bar(menu_bar)
quititem = self.menu_uim.get_widget("/MenuBar/file/quit")
quititem.hide()
aboutitem = self.menu_uim.get_widget("/MenuBar/help/about")
macapp.insert_app_menu_item(aboutitem, 0)
documentationitem = self.menu_uim.get_widget("/MenuBar/help/documentation")
macapp.insert_app_menu_item(documentationitem, 0)
macapp.set_use_quartz_accelerators(False)
macapp.ready()
print "Initialized MacOS support"
def __init__(self, *args, **kwargs):
gtk.Window.__init__(self, *args, **kwargs)
def expose(window, event):
allocation = window.get_allocation()
CONF.set_int("window_w", allocation.width, "state")
CONF.set_int("window_h", allocation.height, "state")
self.connect("expose_event", expose)
def state_change(window, event):
CONF.set_bool(
"window_maximized",
event.new_window_state == gtk.gdk.WINDOW_STATE_MAXIMIZED,
"state")
self.connect("window-state-event", state_change)
d = CONF.get("last_dir", "state")
if d and os.path.isdir(d):
platform.get_platform().set_last_dir(d)
vbox = gtk.VBox(False, 2)
self._recent = []
self.menu_ag = None
mbar = self.make_menubar()
if os.name != "nt":
self._set_icon() # Windows gets the icon from the exe
if os.uname()[0] == "Darwin":
self._init_macos(mbar)
vbox.pack_start(mbar, 0, 0, 0)
self.tabs = None
tabs = self.make_tabs()
tabs.connect("switch-page", lambda n, _, p: self.ev_tab_switched(p))
tabs.connect("page-removed", lambda *a: self.ev_tab_switched())
tabs.show()
self.ev_tab_switched()
vbox.pack_start(tabs, 1, 1, 1)
vbox.pack_start(self.make_status_bar(), 0, 0, 0)
vbox.show()
self.add(vbox)
try:
width = CONF.get_int("window_w", "state")
height = CONF.get_int("window_h", "state")
except Exception:
width = 800
height = 600
self.set_default_size(width, height)
if CONF.get_bool("window_maximized", "state"):
self.maximize()
self.set_title("CHIRP")
self.connect("delete_event", self.ev_delete)
self.connect("destroy", self.ev_destroy)
if not CONF.get_bool("warned_about_reporting") and \
not CONF.get_bool("no_report"):
d = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, parent=self)
d.set_markup("<b><big>" +
_("Error reporting is enabled") +
"</big></b>")
d.format_secondary_markup(\
_("If you wish to disable this feature you may do so in "
"the <u>Help</u> menu"))
d.run()
d.destroy()
CONF.set_bool("warned_about_reporting", True)
self.update_recent_files()
self.update_stock_configs()
self.setup_extra_hotkeys()
def updates_callback(ver):
gobject.idle_add(self._updates, ver)
if not CONF.get_bool("skip_update_check", "state"):
reporting.check_for_updates(updates_callback)
| cl4u2/chirp | chirpui/mainapp.py | Python | gpl-3.0 | 67,000 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.1.3'
| sciunto-org/scifig | libscifig/__init__.py | Python | gpl-3.0 | 69 |
# the problem described below was fixed in 9758!
# keep_htpsit=False fails since 9473,
# on some installations (?) with:
# case A (see below in the code):
# RuntimeError: Could not locate the Fermi level!
# or the energies from the 2nd one behave strange, no convergence:
# iter: 1 18:21:49 +1.7 -3608.512512 0 19
# iter: 2 18:22:31 +1.9 -3148.936317 0
# iter: 3 18:23:13 +2.1 -2375.137532 0
# iter: 4 18:23:58 +2.4 -0.9 -1040.851545 216 11
# iter: 5 18:24:43 +2.6 -1.0 822.569589 597 14
# case B (see below in the code):
# No convergence when starting from a converged (keep_htpsit=True) run!
# WFS error grows to positive values!
# Is it an extreme case of https://trac.fysik.dtu.dk/projects/gpaw/ticket/51 ?
import os
import sys
from ase import Atoms
from gpaw import GPAW
from gpaw import ConvergenceError
from gpaw.mpi import rank
from gpaw.eigensolvers.rmm_diis_old import RMM_DIIS
from gpaw import setup_paths
if len(sys.argv) == 1:
run = 'A'
else:
run = sys.argv[1]
assert run in ['A', 'B']
# Use setups from the $PWD and $PWD/.. first
setup_paths.insert(0, '.')
setup_paths.insert(0, '../')
positions=[
(-0.069, 0.824,-1.295), ( 0.786, 0.943,-0.752), (-0.414,-0.001,-0.865),
(-0.282,-0.674,-3.822), ( 0.018,-0.147,-4.624), (-0.113,-0.080,-3.034),
( 2.253, 1.261, 0.151), ( 2.606, 0.638,-0.539), ( 2.455, 0.790, 1.019),
( 3.106,-0.276,-1.795), ( 2.914, 0.459,-2.386), ( 2.447,-1.053,-1.919),
( 6.257,-0.625,-0.626), ( 7.107,-1.002,-0.317), ( 5.526,-1.129,-0.131),
( 5.451,-1.261,-2.937), ( 4.585,-0.957,-2.503), ( 6.079,-0.919,-2.200),
(-0.515, 3.689, 0.482), (-0.218, 3.020,-0.189), ( 0.046, 3.568, 1.382),
(-0.205, 2.640,-3.337), (-1.083, 2.576,-3.771), (-0.213, 1.885,-2.680),
( 0.132, 6.301,-0.278), ( 1.104, 6.366,-0.068), (-0.148, 5.363,-0.112),
(-0.505, 6.680,-3.285), (-0.674, 7.677,-3.447), (-0.965, 6.278,-2.517),
( 4.063, 3.342,-0.474), ( 4.950, 2.912,-0.663), ( 3.484, 2.619,-0.125),
( 2.575, 2.404,-3.170), ( 1.694, 2.841,-3.296), ( 3.049, 2.956,-2.503),
( 6.666, 2.030,-0.815), ( 7.476, 2.277,-0.316), ( 6.473, 1.064,-0.651),
( 6.860, 2.591,-3.584), ( 6.928, 3.530,-3.176), ( 6.978, 2.097,-2.754),
( 2.931, 6.022,-0.243), ( 3.732, 6.562,-0.004), ( 3.226, 5.115,-0.404),
( 2.291, 7.140,-2.455), ( 1.317, 6.937,-2.532), ( 2.586, 6.574,-1.669),
( 6.843, 5.460, 1.065), ( 7.803, 5.290, 0.852), ( 6.727, 5.424, 2.062),
( 6.896, 4.784,-2.130), ( 6.191, 5.238,-2.702), ( 6.463, 4.665,-1.259),
( 0.398, 0.691, 4.098), ( 0.047, 1.567, 3.807), ( 1.268, 0.490, 3.632),
( 2.687, 0.272, 2.641), ( 3.078, 1.126, 3.027), ( 3.376,-0.501, 2.793),
( 6.002,-0.525, 4.002), ( 6.152, 0.405, 3.660), ( 5.987,-0.447, 4.980),
( 0.649, 3.541, 2.897), ( 0.245, 4.301, 3.459), ( 1.638, 3.457, 3.084),
(-0.075, 5.662, 4.233), (-0.182, 6.512, 3.776), (-0.241, 5.961, 5.212),
( 3.243, 2.585, 3.878), ( 3.110, 2.343, 4.817), ( 4.262, 2.718, 3.780),
( 5.942, 2.582, 3.712), ( 6.250, 3.500, 3.566), ( 6.379, 2.564, 4.636),
( 2.686, 5.638, 5.164), ( 1.781, 5.472, 4.698), ( 2.454, 6.286, 5.887),
( 6.744, 5.276, 3.826), ( 6.238, 5.608, 4.632), ( 7.707, 5.258, 4.110),
( 8.573, 8.472, 0.407), ( 9.069, 7.656, 0.067), ( 8.472, 8.425, 1.397),
( 8.758, 8.245, 2.989), ( 9.294, 9.091, 3.172), ( 7.906, 8.527, 3.373),
( 4.006, 7.734, 3.021), ( 4.685, 8.238, 3.547), ( 3.468, 7.158, 3.624),
( 5.281, 6.089, 6.035), ( 5.131, 7.033, 6.378), ( 4.428, 5.704, 5.720),
( 5.067, 7.323, 0.662), ( 5.785, 6.667, 0.703), ( 4.718, 7.252, 1.585)]
prefix = 'b256H2O'
L = 9.8553729
atoms = Atoms('32(OH2)',
positions=positions)
atoms.set_cell((L,L,L),scale_atoms=False)
atoms.set_pbc(1)
r = [1, 1, 2]
atoms = atoms.repeat(r)
n = [56 * ri for ri in r]
# nbands (>=128) is the number of bands per 32 water molecules
nbands = 2*6*11 # 132
for ri in r: nbands = nbands*ri
# the next line decreases memory usage
es = RMM_DIIS(keep_htpsit=False)
calc = GPAW(nbands=nbands,
# uncomment next two lines to use lcao/sz
#mode='lcao',
#basis='sz',
gpts=tuple(n),
#maxiter=5,
width = 0.01,
eigensolver = es,
txt=prefix + '.txt',
)
if run == 'A':
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
elif run == 'B':
# converge first with keep_htpsit=True
calc.set(eigensolver='rmm-diis')
calc.set(txt=prefix + '_True.txt')
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
# fails to converge with keep_htpsit=False
calc.set(eigensolver=es)
calc.set(maxiter=200)
calc.set(txt=prefix + '_False.txt')
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
| robwarm/gpaw-symm | gpaw/test/big/scf/b256H2O/b256H2O.py | Python | gpl-3.0 | 4,905 |
#!/usr/bin/python3
# Copyright (C) 2014-2016, 2018 Rafael Senties Martinelli
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import configparser
import os
import traceback
__version__ = '18.10.16'
class CCParser(object):
def __init__(self, ini_path='', section='', debug=False):
"""
To init CCParser you can enter a path
and a section. If you doesn't know them yet
you can leave them empty.
If debug is set to True, all the exceptions
will print its traceback.
"""
self._debug = debug
self._config = configparser.ConfigParser()
if ini_path != '':
self.set_configuration_path(ini_path)
if section != '':
self.set_section(section)
self.__default_bool = False
self.__default_string = ''
self.__default_int = 0
self.__default_float = 0.0
self.__default_list = []
self._accepted_true_bool = ('true', 'yes') # must be lower case
self._accepted_false_bool = ('false', 'no') # must be lower case
def __str__(self):
return '''
CCParser instance: {}
Configuration Path: {}
Section: {}
Default boolean: {}
Default float: {}
Default integer: {}
Default string: {}
Default list: {}
'''.format( repr(self),
self.get_configuration_path(),
self.get_section(),
self.get_default_bool(),
self.get_default_float(),
self.get_default_int(),
self.get_default_str(),
self.get_default_list())
def check_value(self, value):
"""
return False if the value don't exists,
return True if the value exists
"""
if not os.path.exists(self.ini_path):
return False
else:
try:
self._config.read(self.ini_path)
except Exception:
print("CCParser Warning: reading damaged file or file without section")
print(traceback.format_exc())
print()
return False
if not self._config.has_section(self.__section):
return False
elif self._config.has_option(self.__section, value):
return True
else:
return False
def get_bool(self, value):
"""
If the value exists, return the boolean
corresponding to the string. If it does
not exists, or the value can not be converted
to a boolean, return the default boolean.
"""
if self.check_value(value):
val = self._config.get(self.__section, value).lower()
if val in self._accepted_false_bool:
return False
elif val in self._accepted_true_bool:
return True
else:
return self.__default_bool
else:
return self.__default_bool
def get_float(self, value):
"""
If the value exists, return the float
corresponding to the string. If it does
not exists, or the value can not be converted
to a float, return the default float.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = float(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_float
else:
return self.__default_float
def get_int(self, value):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the default integer.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = int(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_int
else:
return self.__default_int
def get_list(self, value):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the default integer.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = val.split("|")
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_list
else:
return self.__default_list
def get_str(self, value):
"""
If the value exists, return the string,
other wise return the default string.
"""
if self.check_value(value):
return self._config.get(self.__section, value)
else:
return self.__default_string
def get_bool_defval(self, value, default):
"""
If the value exists, return the boolean
corresponding to the string. If it does
not exists, or the value can not be converted
to a boolean, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value).lower()
if val in self._accepted_false_bool:
return False
elif val in self._accepted_true_bool:
return True
else:
return default
else:
return default
def get_float_defval(self, value, default):
"""
If the value exists, return the float
corresponding to the string. If it does
not exists, or the value can not be converted
to a float, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = float(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return default
else:
return default
def get_int_defval(self, value, default):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = int(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return default
else:
return default
def get_str_defval(self, value, default):
"""
If the value exists, return the string,
if it does not exists, return the the
second argument.
"""
if self.check_value(value):
return self._config.get(self.__section, value)
else:
return default
def set_configuration_path(self, ini_path):
"""
Set the path to the configuration file.
"""
if isinstance(ini_path, str):
self.ini_path = ini_path
if not os.path.exists(ini_path) and self._debug:
print("CCParser Warning: the path to the configuration file does not exists\n")
else:
print("CCParser Warning: The path is not valid.\n")
self.ini_path = ''
def set_section(self, section):
"""
Set the section to check for values.
"""
section = str(section)
self.__section = section
def set_default_float(self, value):
"""
Set the default float to return when
a value does not exists. By default
it returns 0.0
"""
self.__default_float = value
def set_default_string(self, value):
"""
Set the default string to return when
a value does not exists. By default
it returns an empty string.
"""
self.__default_string = value
def set_default_bool(self, value):
"""
Set the default boolean to return when
a value does not exists. By default
it returns false
"""
self.__default_bool = value
def set_default_int(self, value):
"""
Set the default integer to return when
a value does not exists. By default
it returns 0
"""
self.__default_int = value
def set_default_list(self, value):
"""
Set the default integer to return when
a value does not exists. By default
it returns 0
"""
self.__default_list = value
def write(self, value_name, value):
"""
Write the value name and its value.
If the config file does not exists,
or the directories to the path, they
will be created.
"""
if self.ini_path != '' and isinstance(self.ini_path, str):
if not os.path.exists(os.path.dirname(self.ini_path)):
os.makedirs(os.path.dirname(self.ini_path))
if not os.path.exists(self.ini_path):
open(self.ini_path, 'wt').close()
try:
self._config.read(self.ini_path)
except Exception:
print("CCParser Warning: reading damaged file or file without section")
print(traceback.format_exc())
print()
return False
if not self._config.has_section(self.__section):
self._config.add_section(self.__section)
if isinstance(value, list) or isinstance(value, tuple):
values = '|'.join(item for item in value)
self._config.set(self.__section, value_name, values)
else:
self._config.set(self.__section, value_name, str(value))
with open(self.ini_path, 'w') as f:
self._config.write(f)
else:
print(
"CCParser Error: Trying to write the configuration without an ini path.")
print("Configuration Path: " + str(self.get_configuration_path()))
print()
def get_default_bool(self):
return self.__default_bool
def get_default_float(self):
return self.__default_float
def get_default_str(self):
return self.__default_string
def get_default_int(self):
return self.__default_int
def get_default_list(self):
return self.__default_list
def get_section(self):
return self.__section
def get_configuration_path(self):
return self.ini_path
if __name__ == '__main__':
def test(path):
if os.path.exists(path):
os.remove(path)
cp = CCParser(path, 'test')
print('section:', cp.get_section())
cp.write('bool', False)
print(cp.get_bool('bool'))
cp.write('bool', True)
print(cp.get_bool('bool'))
cp.write('string1', 'this is a test')
print(cp.get_str('string1'))
print(cp)
test('/home/rsm/Desktop/test.ini') # unexisting file
| rsm-gh/alienware-kbl | usr/lib/python3/AKBL/CCParser.py | Python | gpl-3.0 | 12,434 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-01 20:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flocks', '0015_auto_20170624_1312'),
('feeding', '0005_auto_20170625_1129'),
]
operations = [
migrations.CreateModel(
name='FeedingPeriodForFlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField(null=True)),
('feed_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feeding.FeedType')),
('flock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flocks.Flock')),
],
),
migrations.RemoveField(
model_name='feedingperiodforroom',
name='feed_type',
),
migrations.RemoveField(
model_name='feedingperiodforroom',
name='room',
),
migrations.DeleteModel(
name='FeedingPeriodForRoom',
),
]
| forcaeluz/easy-fat | feeding/migrations/0006_auto_20170701_2013.py | Python | gpl-3.0 | 1,270 |
# -*- coding: utf-8 -*-
from ptools import *
pdb1f88 = getPDB("1F88")
WritePDB(pdb1f88, "1F88.pdb")
| glamothe/ptools | Tests/get1F88.py | Python | gpl-3.0 | 102 |
from node import models
from django.forms import ModelForm
from . import cdmsportalfunc as cpf
from django.core.exceptions import ValidationError
from django import forms
class MoleculeForm(ModelForm):
class Meta:
model = models.Molecules
fields = '__all__'
class SpecieForm(ModelForm):
datearchived = forms.DateField(
widget=forms.TextInput(attrs={'readonly': 'readonly'})
)
dateactivated = forms.DateField(
widget=forms.TextInput(attrs={'readonly': 'readonly'})
)
class Meta:
model = models.Species
fields = '__all__'
class FilterForm(ModelForm):
class Meta:
model = models.QuantumNumbersFilter
fields = '__all__'
class XsamsConversionForm(forms.Form):
inurl = forms.URLField(
label='Input URL',
required=False,
widget=forms.TextInput(
attrs={'size': 50,
'title': 'Paste here a URL that delivers an XSAMS '
'document.',
}))
infile = forms.FileField()
format = forms.ChoiceField(
choices=[("RAD 3D", "RAD 3D"), ("CSV", "CSV")], )
def clean(self):
infile = self.cleaned_data.get('infile')
inurl = self.cleaned_data.get('inurl')
if (infile and inurl):
raise ValidationError('Give either input file or URL!')
if inurl:
try:
data = cpf.urlopen(inurl)
except Exception as err:
raise ValidationError('Could not open given URL: %s' % err)
elif infile:
data = infile
else:
raise ValidationError('Give either input file or URL!')
try:
self.cleaned_data['result'] = cpf.applyStylesheet2File(data)
except Exception as err:
raise ValidationError('Could not transform XML file: %s' % err)
return self.cleaned_data
| cpe/VAMDC-VALD | nodes/cdms/node/forms.py | Python | gpl-3.0 | 1,963 |
"""Clean db
Revision ID: 4f8bd7cac829
Revises: 3f249e0d2769
Create Date: 2014-01-09 14:03:13.997656
"""
# revision identifiers, used by Alembic.
revision = '4f8bd7cac829'
down_revision = '3f249e0d2769'
from alembic import op
import sqlalchemy as sa
def upgrade():
''' Drop the columns calendar_multiple_meetings and
calendar_regional_meetings and rename meeting_region into
meeting_location.
'''
op.drop_column('calendars', 'calendar_multiple_meetings')
op.drop_column('calendars', 'calendar_regional_meetings')
op.alter_column(
'meetings',
column_name='meeting_region',
name='meeting_location',
type_=sa.Text,
existing_type=sa.String(100))
def downgrade():
''' Add the columns calendar_multiple_meetings and
calendar_regional_meetings and rename meeting_location into
meeting_region.
'''
op.add_column(
'calendars',
sa.Column(
'calendar_multiple_meetings',
sa.Boolean, default=False,
nullable=False
)
)
op.add_column(
'calendars',
sa.Column(
'calendar_regional_meetings',
sa.Boolean, default=False,
nullable=False
)
)
op.alter_column(
'meetings',
column_name='meeting_location',
name='meeting_region',
type_=sa.String(100),
existing_type=sa.Text)
| fedora-infra/fedocal | alembic/versions/4f8bd7cac829_clean_db.py | Python | gpl-3.0 | 1,420 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Takes Google's json encoded spreadsheet and prints a python dictionary keyed by
the values in the first column of the SS. ©2017 J. J. Crump, GNU general public
license
"""
import urllib2
from pprint import pprint
import re
import json
# This is the url of a sample google spreadsheet that I've published to the web. The url returns a prettyprinted json string:
ssURL = "https://spreadsheets.google.com/feeds/list/1OPNQC3xBp3iQTpjVfd6cpvvA0BpHWhb3QiNOvGFZ9z8/od6/public/basic?prettyprint=true&alt=json"
response = urllib2.urlopen(ssURL)
jsonIn = response.read()
pyDict = json.loads(jsonIn)
entryList = pyDict['feed']['entry']
fields = ["name", "city", "state", "zip"]
SSdict = {}
def parsestring(rowstring, fields):
"""yields tuples of (fieldname, fieldvalue)"""
i = iter(fields[1:])
field = i.next()
start = end = 0
try:
while True:
lastfield = field
field = i.next()
if rowstring.find(field) == -1:
field = lastfield
continue
end = rowstring.find(field)
yield lastfield, re.sub('^.*?:', '', rowstring[start:end].strip().strip(',')).strip()
start = end
except StopIteration:
start = rowstring.find(field)
yield lastfield, re.sub('^.*?:', '', rowstring[start:].strip().strip(',')).strip()
for e in entryList:
entrydict = dict([x for x in parsestring(e['content']['$t'], fields)])
entrykey = e['title']['$t']
SSdict[entrykey] = entrydict
#print stringIn
pprint(SSdict)
| jjon/Google-Spreadsheet-python-scripts | GSheet2Python.py | Python | gpl-3.0 | 1,622 |
#-*- encoding: utf-8 -*-
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, RequestContext, render
from membro_profile.forms import MembroForm, MembroProfileForm, EditProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from membro_profile.models import MembroProfile
from submissao.models import Submissao
def some_view(request):
if not request.user.is_authenticated():
return HttpResponse("You are logged in.")
else:
return HttpResponse("You are not logged in.")
# Create your views here.
def register(request):
context = RequestContext(request)
registered = False
if request.method == 'POST':
membro_form = MembroForm(data=request.POST)
membro_profile_form = MembroProfileForm(data=request.POST)
if membro_form.is_valid() and membro_profile_form.is_valid():
membro = membro_form.save()
membro.set_password(membro.password)
membro.save()
membro_profile = membro_profile_form.save(commit=False)
membro_profile.user = membro
if 'avatar' in request.FILES:
membro_profile.picture = request.FILES['avatar']
membro_profile.save()
registered = True
else:
print (membro_form.errors, membro_profile_form.errors)
else:
membro_form = MembroForm()
membro_profile_form = MembroProfileForm()
return render_to_response(
'profile/register.html',
# {'membro_form': membro_form, 'registered': registered},
{'membro_form': membro_form, 'membro_profile_form': membro_profile_form, 'registered': registered},
context)
def membro_login(request):
context = RequestContext(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
membro = authenticate(username=username,password=password)
if membro:
if membro.is_active:
login(request, membro)
return HttpResponseRedirect('/')
else:
return HttpResponse('Sua conta ainda não foi liberada.')
else:
print ("Login e senha invalidos: {0}, {1}".format(username, password))
return HttpResponse("Login ou Senha, Invalidos")
else:
# return render_to_response('profile/404.html', {}, context)
return render_to_response('profile/login.html', {}, context)
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/')
@login_required
def profile(request):
context = RequestContext(request)
print (context)
usuario = User.objects.get(username=request.user)
membro = MembroProfile.objects.get(user=usuario)
if membro:
return render_to_response('profile/profile.html', {'m':membro}, context)
else:
return HttpResponse('Inscrição não encontrado')
@login_required
def edit_profile(request):
membro = request.user
form = EditProfileForm(
request.POST or None,
initial={
'first_name': membro.first_name,
'last_name': membro.last_name,
'cpf': membro.membroprofile.cpf,
}
)
if form.is_valid():
membro.first_name = request.POST['first_name']
membro.last_name = request.POST['last_name']
membro.cpf = request.POST['cpf']
membro.save()
return HttpResponseRedirect('%s'%(reverse('profile')))
context = {
"form": form
}
return render(request, 'profile/editar.html', context)
#from submissao.models import Submissao
def index(request):
context = RequestContext(request)
print (str(request.user) == 'AnonymousUser')
if str(request.user) == 'AnonymousUser':
return render_to_response('profile/login.html', context)
else:
queryset = Submissao.objects.filter(autor_id=request.user.membroprofile.id or None)
if request.user.is_authenticated():
membro = MembroProfile.objects.filter(user__username=request.user).latest('user').user
context["membro"] = membro
context['lista_resumos'] = queryset
return render_to_response('profile/index.html', context)
else:
return render_to_response('profile/login.html', context) | pixies/academic | membro_profile/views.py | Python | gpl-3.0 | 4,644 |
# -*- coding: utf-8 -*-
class ImproperlyConfigured(Exception):
pass
class TaskHandlingError(Exception):
pass
| Outernet-Project/artexinweb | artexinweb/exceptions.py | Python | gpl-3.0 | 121 |
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, timedelta
import json
import csv
import pytz
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import Group, Permission
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from django.views.decorators.cache import cache_page
from Map.models import *
from Map import utils, signals
from core.utils import get_config
from POS.models import POS
# Decorator to check map permissions. Takes request and map_id
# Permissions are 0 = None, 1 = View, 2 = Change
# When used without a permission=x specification, requires Change access
def require_map_permission(permission=2):
def _dec(view_func):
def _view(request, map_id, *args, **kwargs):
current_map = get_object_or_404(Map, pk=map_id)
if current_map.get_permission(request.user) < permission:
raise PermissionDenied
else:
return view_func(request, map_id, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__doc__ = view_func.__doc__
_view.__dict__ = view_func.__dict__
return _view
return _dec
@login_required
@require_map_permission(permission=1)
def get_map(request, map_id):
"""Get the map and determine if we have permissions to see it.
If we do, then return a TemplateResponse for the map. If map does not
exist, return 404. If we don't have permission, return PermissionDenied.
"""
current_map = get_object_or_404(Map, pk=map_id)
context = {
'map': current_map,
'access': current_map.get_permission(request.user),
}
return TemplateResponse(request, 'map.html', context)
@login_required
@require_map_permission(permission=1)
def map_checkin(request, map_id):
# Initialize json return dict
json_values = {}
current_map = get_object_or_404(Map, pk=map_id)
# AJAX requests should post a JSON datetime called loadtime
# back that we use to get recent logs.
if 'loadtime' not in request.POST:
return HttpResponse(json.dumps({'error': "No loadtime"}),
mimetype="application/json")
time_string = request.POST['loadtime']
if time_string == 'null':
return HttpResponse(json.dumps({'error': "No loadtime"}),
mimetype="application/json")
load_time = datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S.%f")
load_time = load_time.replace(tzinfo=pytz.utc)
if request.is_igb_trusted:
dialog_html = _checkin_igb_trusted(request, current_map)
if dialog_html is not None:
json_values.update({'dialogHTML': dialog_html})
log_list = MapLog.objects.filter(timestamp__gt=load_time,
visible=True,
map=current_map)
log_string = render_to_string('log_div.html', {'logs': log_list})
json_values.update({'logs': log_string})
return HttpResponse(json.dumps(json_values), mimetype="application/json")
@login_required
@require_map_permission(permission=1)
def map_refresh(request, map_id):
"""
Returns an HttpResponse with the updated systemJSON for an asynchronous
map refresh.
"""
if not request.is_ajax():
raise PermissionDenied
current_map = get_object_or_404(Map, pk=map_id)
if request.is_igb:
char_cache_key = 'char_%s_location' % request.eve_charid
old_location = cache.get(char_cache_key)
if old_location:
my_sys = get_object_or_404(System, pk=old_location[0])
my_sys.remove_active_pilot(request.eve_charid)
my_sys.add_active_pilot(request.user.username, request.eve_charid, request.eve_charname, request.eve_shipname, request.eve_shiptypename)
result = None
result = [
datetime.strftime(datetime.now(pytz.utc),
"%Y-%m-%d %H:%M:%S.%f"),
utils.MapJSONGenerator(current_map,
request.user).get_systems_json()
]
# TODO update active pilots
# get users current system
#map_sys = get_object_or_404(MapSystem, pk=ms_id)
# if this system is on the map, update. Otherwise, don't..
#remove_active_pilot(request.eve_charid)
#map_sys.remove_active_pilot(request.eve_charid)
#map_sys.add_active_pilot(request.user.username, request.eve_charid,
# request.eve_charname, request.eve_shipname,
# request.eve_shiptypename)
return HttpResponse(json.dumps(result))
def log_movement(oldSys, newSys, charName, shipType, current_map, user):
# get msid for oldsys
# print oldSys
# print newSys
try:
oldSysMapId = current_map.systems.filter(system=oldSys).all()[0]
newSysMapId = current_map.systems.filter(system=newSys).all()[0] # BUG
wh = Wormhole.objects.filter(top__in=[oldSysMapId,newSysMapId],bottom__in=[oldSysMapId,newSysMapId]).all()[0]
# get current ship size
#print shipType
shipSize = Ship.objects.get(shipname=shipType).shipmass
# get old mass
if wh.mass_amount != None:
wh.mass_amount = (wh.mass_amount + shipSize)
else:
wh.mass_amount = shipSize
wh.save()
except:
print "Hole didn't exist yet"
# jumplog
jl = JumpLog.objects.create(user_id=user.id, char_name=charName, src=oldSys, dest=newSys)
jl.save()
def _checkin_igb_trusted(request, current_map):
"""
Runs the specific code for the case that the request came from an igb that
trusts us, returns None if no further action is required, returns a string
containing the html for a system add dialog if we detect that a new system
needs to be added
"""
# XXX possibly where the logging needs to happen
can_edit = current_map.get_permission(request.user) == 2
current_location = (request.eve_systemid, request.eve_charname,
request.eve_shipname, request.eve_shiptypename)
char_cache_key = 'char_%s_location' % request.eve_charid
old_location = cache.get(char_cache_key)
result = None
#print old_location
if old_location != current_location:
current_system = get_object_or_404(System, pk=current_location[0])
if old_location:
old_system = get_object_or_404(System, pk=old_location[0])
old_system.remove_active_pilot(request.eve_charid)
log_movement(old_system, current_system, request.eve_charname, request.eve_shiptypename, current_map, request.user) #XXX vtadd
current_system.add_active_pilot(request.user.username,
request.eve_charid, request.eve_charname, request.eve_shipname,
request.eve_shiptypename)
request.user.get_profile().update_location(current_system.pk,
request.eve_charid, request.eve_charname, request.eve_shipname,
request.eve_shiptypename)
cache.set(char_cache_key, current_location, 60 * 5)
#Conditions for the system to be automagically added to the map.
if (can_edit and
old_location and
old_system in current_map
and current_system not in current_map
and not _is_moving_from_kspace_to_kspace(old_system, current_system)
):
context = {
'oldsystem': current_map.systems.filter(
system=old_system).all()[0],
'newsystem': current_system,
'wormholes': utils.get_possible_wh_types(old_system,
current_system),
}
if request.POST.get('silent', 'false') != 'true':
result = render_to_string('igb_system_add_dialog.html', context,
context_instance=RequestContext(request))
else:
new_ms = current_map.add_system(request.user, current_system, '',
context['oldsystem'])
k162_type = WormholeType.objects.get(name="K162")
new_ms.connect_to(context['oldsystem'], k162_type, k162_type)
result = 'silent'
# maybe fixes
else:
cache.set(char_cache_key, current_location, 60 * 5)
return result
def _is_moving_from_kspace_to_kspace(old_system, current_system):
"""
returns whether we are moving through kspace
:param old_system:
:param current_system:
:return:
"""
return old_system.is_kspace() and current_system.is_kspace()
def get_system_context(ms_id, user):
map_system = get_object_or_404(MapSystem, pk=ms_id)
if map_system.map.get_permission(user) == 2:
can_edit = True
else:
can_edit = False
#If map_system represents a k-space system get the relevant KSystem object
if map_system.system.is_kspace():
system = map_system.system.ksystem
else:
system = map_system.system.wsystem
scan_threshold = datetime.now(pytz.utc) - timedelta(
hours=int(get_config("MAP_SCAN_WARNING", None).value)
)
interest_offset = int(get_config("MAP_INTEREST_TIME", None).value)
interest_threshold = (datetime.now(pytz.utc)
- timedelta(minutes=interest_offset))
scan_warning = system.lastscanned < scan_threshold
if interest_offset > 0:
interest = (map_system.interesttime and
map_system.interesttime > interest_threshold)
else:
interest = map_system.interesttime
# Include any SiteTracker fleets that are active
st_fleets = map_system.system.stfleets.filter(ended=None).all()
locations = cache.get('sys_%s_locations' % map_system.system.pk)
if not locations:
locations = {}
return {'system': system, 'mapsys': map_system,
'scanwarning': scan_warning, 'isinterest': interest,
'stfleets': st_fleets, 'locations': locations,
'can_edit': can_edit}
@login_required
@require_map_permission(permission=2)
def add_system(request, map_id):
"""
AJAX view to add a system to a current_map. Requires POST containing:
topMsID: map_system ID of the parent map_system
bottomSystem: Name of the new system
topType: WormholeType name of the parent side
bottomType: WormholeType name of the new side
timeStatus: Wormhole time status integer value
massStatus: Wormhole mass status integer value
topBubbled: 1 if Parent side bubbled
bottomBubbled: 1 if new side bubbled
friendlyName: Friendly name for the new map_system
"""
if not request.is_ajax():
raise PermissionDenied
try:
# Prepare data
current_map = Map.objects.get(pk=map_id)
top_ms = MapSystem.objects.get(pk=request.POST.get('topMsID'))
bottom_sys = System.objects.get(
name=request.POST.get('bottomSystem')
)
top_type = WormholeType.objects.get(
name=request.POST.get('topType')
)
bottom_type = WormholeType.objects.get(
name=request.POST.get('bottomType')
)
time_status = int(request.POST.get('timeStatus'))
mass_status = int(request.POST.get('massStatus'))
if request.POST.get('topBubbled', '0') != "0":
top_bubbled = True
else:
top_bubbled = False
if request.POST.get('bottomBubbled', '0') != "0":
bottom_bubbled = True
else:
bottom_bubbled = False
# Add System
bottom_ms = current_map.add_system(
request.user, bottom_sys,
request.POST.get('friendlyName'), top_ms
)
# Add Wormhole
bottom_ms.connect_to(top_ms, top_type, bottom_type, top_bubbled,
bottom_bubbled, time_status, mass_status)
current_map.clear_caches()
return HttpResponse()
except ObjectDoesNotExist:
return HttpResponse(status=400)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def remove_system(request, map_id, ms_id):
"""
Removes the supplied map_system from a map.
"""
system = get_object_or_404(MapSystem, pk=ms_id)
system.remove_system(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_details(request, map_id, ms_id):
"""
Returns a html div representing details of the System given by ms_id in
map map_id
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_details.html',
get_system_context(ms_id, request.user))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_menu(request, map_id, ms_id):
"""
Returns the html for system menu
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_menu.html',
get_system_context(ms_id, request.user))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_tooltips(request, map_id):
"""
Returns the system tooltips for map_id
"""
if not request.is_ajax():
raise PermissionDenied
cache_key = 'map_%s_sys_tooltip' % map_id
cached_tips = cache.get(cache_key)
if not cached_tips:
ms_list = MapSystem.objects.filter(map_id=map_id)\
.select_related('parent_wormhole', 'system__region')\
.iterator()
new_tips = render_to_string('system_tooltip.html',
{'map_systems': ms_list}, RequestContext(request))
cache.set(cache_key, new_tips, 60)
return HttpResponse(new_tips)
else:
return HttpResponse(cached_tips)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def wormhole_tooltips(request, map_id):
"""Takes a POST request from AJAX with a Wormhole ID and renders the
wormhole tooltip for that ID to response.
"""
if not request.is_ajax():
raise PermissionDenied
cache_key = 'map_%s_wh_tooltip' % map_id
cached_tips = cache.get(cache_key)
if not cached_tips:
cur_map = get_object_or_404(Map, pk=map_id)
ms_list = MapSystem.objects.filter(map=cur_map).all()
whs = Wormhole.objects.filter(top__in=ms_list).all()
new_tips = render_to_string('wormhole_tooltip.html',
{'wormholes': whs}, RequestContext(request))
cache.set(cache_key, new_tips, 60)
return HttpResponse(new_tips)
else:
return HttpResponse(cached_tips)
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def collapse_system(request, map_id, ms_id):
"""
Mark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = True
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def resurrect_system(request, map_id, ms_id):
"""
Unmark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = False
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def mark_scanned(request, map_id, ms_id):
"""Takes a POST request from AJAX with a system ID and marks that system
as scanned.
"""
if request.is_ajax():
map_system = get_object_or_404(MapSystem, pk=ms_id)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required()
def manual_location(request, map_id, ms_id):
"""Takes a POST request form AJAX with a System ID and marks the user as
being active in that system.
"""
if not request.is_ajax():
raise PermissionDenied
user_locations = cache.get('user_%s_locations' % request.user.pk)
if user_locations:
old_location = user_locations.pop(request.user.pk, None)
if old_location:
old_sys = get_object_or_404(System, pk=old_location[0])
old_sys.remove_active_pilot(request.user.pk)
map_sys = get_object_or_404(MapSystem, pk=ms_id)
map_sys.system.add_active_pilot(request.user.username, request.user.pk,
'OOG Browser', 'Unknown', 'Unknown')
request.user.get_profile().update_location(map_sys.system.pk, request.user.pk,
'OOG Browser', 'Unknown', 'Unknown')
map_sys.map.clear_caches()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def set_interest(request, map_id, ms_id):
"""Takes a POST request from AJAX with an action and marks that system
as having either utcnow or None as interesttime. The action can be either
"set" or "remove".
"""
if request.is_ajax():
action = request.POST.get("action", "none")
if action == "none":
raise Http404
system = get_object_or_404(MapSystem, pk=ms_id)
if action == "set":
system.interesttime = datetime.now(pytz.utc)
system.save()
return HttpResponse()
if action == "remove":
system.interesttime = None
system.save()
return HttpResponse()
system.map.clear_caches()
return HttpResponse(status=418)
else:
raise PermissionDenied
def _update_sig_from_tsv(signature, row):
COL_SIG = 0
COL_SIG_TYPE = 3
COL_SIG_GROUP = 2
COL_SIG_SCAN_GROUP = 1
COL_SIG_STRENGTH = 4
COL_DISTANCE = 5
info = row[COL_SIG_TYPE]
updated = False
sig_type = None
if (row[COL_SIG_SCAN_GROUP] == "Cosmic Signature"
or row[COL_SIG_SCAN_GROUP] == "Cosmic Anomaly"
):
try:
sig_type = SignatureType.objects.get(
longname=row[COL_SIG_GROUP])
except:
sig_type = None
else:
sig_type = None
if sig_type:
updated = True
if sig_type:
signature.sigtype = sig_type
signature.updated = updated or signature.updated
if info:
signature.info = info
if signature.info == None:
signature.info = ''
return signature
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def bulk_sig_import(request, map_id, ms_id):
"""
GET gets a bulk signature import form. POST processes it, creating sigs
with blank info and type for each sig ID detected.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
k = 0
if request.method == 'POST':
reader = csv.reader(request.POST.get('paste', '').decode(
'utf-8').splitlines(), delimiter="\t")
COL_SIG = 0
COL_STRENGTH = 4
for row in reader:
# To prevent pasting of POSes into the sig importer, make sure
# the strength column is present
try:
test_var = row[COL_STRENGTH]
except IndexError:
return HttpResponse('A valid signature paste was not found',
status=400)
if k < 75:
sig_id = utils.convert_signature_id(row[COL_SIG])
sig = Signature.objects.get_or_create(sigid=sig_id,
system=map_system.system)[0]
sig = _update_sig_from_tsv(sig, row)
sig.modified_by = request.user
sig.save()
signals.signature_update.send_robust(sig, user=request.user,
map=map_system.map,
signal_strength=row[COL_STRENGTH])
k += 1
map_system.map.add_log(request.user,
"Imported %s signatures for %s(%s)."
% (k, map_system.system.name,
map_system.friendlyname), True)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
return TemplateResponse(request, "bulk_sig_form.html",
{'mapsys': map_system})
@login_required
@require_map_permission(permission=2)
def toggle_sig_owner(request, map_id, ms_id, sig_id=None):
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.toggle_ownership(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def edit_signature(request, map_id, ms_id, sig_id=None):
"""
GET gets a pre-filled edit signature form.
POST updates the signature with the new information and returns a
blank add form.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
# If the user can't edit signatures, return a blank response
if map_system.map.get_permission(request.user) != 2:
return HttpResponse()
action = None
if sig_id != None:
signature = get_object_or_404(Signature, pk=sig_id)
created = False
if not signature.owned_by:
signature.toggle_ownership(request.user)
if request.method == 'POST':
form = SignatureForm(request.POST)
if form.is_valid():
ingame_id = utils.convert_signature_id(form.cleaned_data['sigid'])
if sig_id == None:
signature, created = Signature.objects.get_or_create(
system=map_system.system, sigid=ingame_id)
signature.sigid = ingame_id
signature.updated = True
signature.info = form.cleaned_data['info']
if request.POST['sigtype'] != '':
sigtype = form.cleaned_data['sigtype']
else:
sigtype = None
signature.sigtype = sigtype
signature.modified_by = request.user
signature.save()
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
if created:
action = 'Created'
else:
action = 'Updated'
if signature.owned_by:
signature.toggle_ownership(request.user)
map_system.map.add_log(request.user,
"%s signature %s in %s (%s)" %
(action, signature.sigid, map_system.system.name,
map_system.friendlyname))
signals.signature_update.send_robust(signature, user=request.user,
map=map_system.map)
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': form,
'system': map_system, 'sig': signature})
form = SignatureForm()
if sig_id == None or action == 'Updated':
return TemplateResponse(request, "add_sig_form.html",
{'form': form, 'system': map_system})
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': SignatureForm(instance=signature),
'system': map_system, 'sig': signature})
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=1)
@cache_page(1)
def get_signature_list(request, map_id, ms_id):
"""
Determines the proper escalationThreshold time and renders
system_signatures.html
"""
if not request.is_ajax():
raise PermissionDenied
system = get_object_or_404(MapSystem, pk=ms_id)
escalation_downtimes = int(get_config("MAP_ESCALATION_BURN",
request.user).value)
return TemplateResponse(request, "system_signatures.html",
{'system': system,
'downtimes': escalation_downtimes})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def mark_signature_cleared(request, map_id, ms_id, sig_id):
"""
Marks a signature as having its NPCs cleared.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.clear_rats()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def escalate_site(request, map_id, ms_id, sig_id):
"""
Marks a site as having been escalated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.escalate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def activate_signature(request, map_id, ms_id, sig_id):
"""
Marks a site activated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.activate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def delete_signature(request, map_id, ms_id, sig_id):
"""
Deletes a signature.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
sig = get_object_or_404(Signature, pk=sig_id)
sig.delete()
map_system.map.add_log(request.user, "Deleted signature %s in %s (%s)."
% (sig.sigid, map_system.system.name,
map_system.friendlyname))
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def manual_add_system(request, map_id, ms_id):
"""
A GET request gets a blank add system form with the provided MapSystem
as top system. The form is then POSTed to the add_system view.
"""
if request.is_igb_trusted:
current_system = System.objects.get(name=request.eve_systemname)
else:
current_system = ""
top_map_system = get_object_or_404(MapSystem, pk=ms_id)
systems = System.objects.all()
wormholes = WormholeType.objects.all()
return render(request, 'add_system_box.html',
{'topMs': top_map_system, 'sysList': systems,
'whList': wormholes,'newsystem': current_system})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_system(request, map_id, ms_id):
"""
A GET request gets the edit system dialog pre-filled with current
information.
A POST request saves the posted data as the new information.
POST values are friendlyName, info, and occupied.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
if request.method == 'GET':
occupied = map_system.system.occupied.replace("<br />", "\n")
info = map_system.system.info.replace("<br />", "\n")
return TemplateResponse(request, 'edit_system.html',
{'mapsys': map_system,
'occupied': occupied, 'info': info}
)
if request.method == 'POST':
map_system.friendlyname = request.POST.get('friendlyName', '')
if (
(map_system.system.info != request.POST.get('info', '')) or
(map_system.system.occupied !=
request.POST.get('occupied', ''))
):
map_system.system.info = request.POST.get('info', '')
map_system.system.occupied = request.POST.get('occupied', '')
map_system.system.save()
map_system.save()
map_system.map.add_log(request.user, "Edited System: %s (%s)"
% (map_system.system.name,
map_system.friendlyname))
return HttpResponse()
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_wormhole(request, map_id, wh_id):
"""
A GET request gets the edit wormhole dialog pre-filled with current info.
A POST request saves the posted data as the new info.
POST values are topType, bottomType, massStatus, timeStatus, topBubbled,
and bottomBubbled.
"""
if not request.is_ajax():
raise PermissionDenied
wormhole = get_object_or_404(Wormhole, pk=wh_id)
if request.method == 'GET':
return TemplateResponse(request, 'edit_wormhole.html',
{'wormhole': wormhole}
)
if request.method == 'POST':
manualShipMassAdd = request.POST.get('massAdd',0)
if manualShipMassAdd != "":
addedMass = Ship.objects.get(shipname=manualShipMassAdd).shipmass
wormhole.mass_amount = (wormhole.mass_amount + addedMass)
wormhole.mass_status = int(request.POST.get('massStatus', 0))
wormhole.time_status = int(request.POST.get('timeStatus', 0))
wormhole.top_type = get_object_or_404(
WormholeType,
name=request.POST.get('topType', 'K162')
)
wormhole.bottom_type = get_object_or_404(
WormholeType,
name=request.POST.get('bottomType', 'K162')
)
wormhole.top_bubbled = request.POST.get('topBubbled', '1') == '1'
wormhole.bottom_bubbled = request.POST.get('bottomBubbled', '1') == '1'
wormhole.save()
wormhole.map.add_log(request.user,
("Updated the wormhole between %s(%s) and %s(%s)."
% (wormhole.top.system.name,
wormhole.top.friendlyname,
wormhole.bottom.system.name,
wormhole.bottom.friendlyname)))
return HttpResponse()
raise PermissiondDenied
@permission_required('Map.add_map')
def create_map(request):
"""
This function creates a map and then redirects to the new map.
"""
if request.method == 'POST':
form = MapForm(request.POST)
if form.is_valid():
new_map = form.save()
new_map.add_log(request.user, "Created the %s map." % new_map.name)
new_map.add_system(request.user, new_map.root, "Root", None)
return HttpResponseRedirect(reverse('Map.views.get_map',
kwargs={'map_id': new_map.pk}))
else:
return TemplateResponse(request, 'new_map.html', {'form': form})
else:
form = MapForm
return TemplateResponse(request, 'new_map.html', {'form': form, })
def _sort_destinations(destinations):
"""
Takes a list of destination tuples and returns the same list, sorted in order of the jumps.
"""
results = []
onVal = 0
for dest in destinations:
if len(results) == 0:
results.append(dest)
else:
while onVal <= len(results):
if onVal == len(results):
results.append(dest)
onVal = 0
break
else:
if dest[1] > results[onVal][1]:
onVal += 1
else:
results.insert(onVal, dest)
onVal = 0
break
return results
# noinspection PyUnusedLocal
@require_map_permission(permission=1)
def destination_list(request, map_id, ms_id):
"""
Returns the destinations of interest tuple for K-space systems and
a blank response for w-space systems.
"""
if not request.is_ajax():
raise PermissionDenied
destinations = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
map_system = get_object_or_404(MapSystem, pk=ms_id)
try:
system = KSystem.objects.get(pk=map_system.system.pk)
rf = utils.RouteFinder()
result = []
for destination in destinations:
result.append((destination.system,
rf.route_length(system,
destination.system) - 1,
round(rf.ly_distance(system,
destination.system), 3)
))
except ObjectDoesNotExist:
return HttpResponse()
return render(request, 'system_destinations.html',
{'system': system, 'destinations': _sort_destinations(result)})
# noinspection PyUnusedLocal
def site_spawns(request, map_id, ms_id, sig_id):
"""
Returns the spawns for a given signature and system.
"""
sig = get_object_or_404(Signature, pk=sig_id)
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype).all()
if spawns[0].sysclass != 0:
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype,
sysclass=sig.system.sysclass).all()
return render(request, 'site_spawns.html', {'spawns': spawns})
#########################
#Settings Views #
#########################
@permission_required('Map.map_admin')
def general_settings(request):
"""
Returns and processes the general settings section.
"""
npc_threshold = get_config("MAP_NPC_THRESHOLD", None)
pvp_threshold = get_config("MAP_PVP_THRESHOLD", None)
scan_threshold = get_config("MAP_SCAN_WARNING", None)
interest_time = get_config("MAP_INTEREST_TIME", None)
escalation_burn = get_config("MAP_ESCALATION_BURN", None)
if request.method == "POST":
scan_threshold.value = int(request.POST['scanwarn'])
interest_time.value = int(request.POST['interesttimeout'])
pvp_threshold.value = int(request.POST['pvpthreshold'])
npc_threshold.value = int(request.POST['npcthreshold'])
escalation_burn.value = int(request.POST['escdowntimes'])
scan_threshold.save()
interest_time.save()
pvp_threshold.save()
npc_threshold.save()
escalation_burn.save()
return HttpResponse()
return TemplateResponse(
request, 'general_settings.html',
{'npcthreshold': npc_threshold.value,
'pvpthreshold': pvp_threshold.value,
'scanwarn': scan_threshold.value,
'interesttimeout': interest_time.value,
'escdowntimes': escalation_burn.value}
)
@permission_required('Map.map_admin')
def sites_settings(request):
"""
Returns the site spawns section.
"""
return TemplateResponse(request, 'spawns_settings.html',
{'spawns': SiteSpawn.objects.all()})
@permission_required('Map.map_admin')
def add_spawns(request):
"""
Adds a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_spawns(request, spawn_id):
"""
Deletes a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_spawns(request, spawn_id):
"""
Alters a site spawn.
"""
return HttpResponse()
def destination_settings(request, user=None):
"""
Returns the destinations section.
"""
if not user:
dest_list = Destination.objects.filter(user=None)
else:
dest_list = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
return TemplateResponse(request, 'dest_settings.html',
{'destinations': dest_list,
'user_context': user})
def add_destination(request, dest_user=None):
"""
Add a destination.
"""
if not dest_user and not request.user.has_perm('Map.map_admin'):
raise PermissionDenied
system = get_object_or_404(KSystem, name=request.POST['systemName'])
Destination(system=system, user=dest_user).save()
return HttpResponse()
def add_personal_destination(request):
"""
Add a personal destination.
"""
return add_destination(request, dest_user=request.user)
def delete_destination(request, dest_id):
"""
Deletes a destination.
"""
destination = get_object_or_404(Destination, pk=dest_id)
if not request.user.has_perm('Map.map_admin') and not destination.user:
raise PermissionDenied
if destination.user and not request.user == destination.user:
raise PermissionDenied
destination.delete()
return HttpResponse()
@permission_required('Map.map_admin')
def sigtype_settings(request):
"""
Returns the signature types section.
"""
return TemplateResponse(request, 'sigtype_settings.html',
{'sigtypes': SignatureType.objects.all()})
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_sigtype(request, sigtype_id):
"""
Alters a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def add_sigtype(request):
"""
Adds a signature type.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_sigtype(request, sigtype_id):
"""
Deletes a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def map_settings(request, map_id):
"""
Returns and processes the settings section for a map.
"""
saved = False
subject = get_object_or_404(Map, pk=map_id)
if request.method == 'POST':
name = request.POST.get('name', None)
explicit_perms = request.POST.get('explicitperms', False)
if not name:
return HttpResponse('The map name cannot be blank', status=400)
subject.name = name
subject.explicitperms = explicit_perms
for group in Group.objects.all():
MapPermission.objects.filter(group=group, map=subject).delete()
setting = request.POST.get('map-%s-group-%s-permission' % (
subject.pk, group.pk), 0)
if setting != 0:
MapPermission(group=group, map=subject, access=setting).save()
subject.save()
saved = True
groups = []
for group in Group.objects.all():
if MapPermission.objects.filter(map=subject, group=group).exists():
perm = MapPermission.objects.get(map=subject, group=group).access
else:
perm = 0
groups.append((group,perm))
return TemplateResponse(request, 'map_settings_single.html',
{'map': subject, 'groups': groups, 'saved': saved})
@permission_required('Map.map_admin')
def delete_map(request, map_id):
"""
Deletes a map.
"""
subject = get_object_or_404(Map, pk=map_id)
subject.delete()
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_map(request, map_id):
"""
Alters a map.
"""
return HttpResponse('[]')
@permission_required('Map.map_admin')
def global_permissions(request):
"""
Returns and processes the global permissions section.
"""
if not request.is_ajax():
raise PermissionDenied
group_list = []
admin_perm = Permission.objects.get(codename="map_admin")
unrestricted_perm = Permission.objects.get(codename="map_unrestricted")
add_map_perm = Permission.objects.get(codename="add_map")
if request.method == "POST":
for group in Group.objects.all():
if request.POST.get('%s_unrestricted' % group.pk, None):
if unrestricted_perm not in group.permissions.all():
group.permissions.add(unrestricted_perm)
else:
if unrestricted_perm in group.permissions.all():
group.permissions.remove(unrestricted_perm)
if request.POST.get('%s_add' % group.pk, None):
if add_map_perm not in group.permissions.all():
group.permissions.add(add_map_perm)
else:
if add_map_perm in group.permissions.all():
group.permissions.remove(add_map_perm)
if request.POST.get('%s_admin' % group.pk, None):
if admin_perm not in group.permissions.all():
group.permissions.add(admin_perm)
else:
if admin_perm in group.permissions.all():
group.permissions.remove(admin_perm)
return HttpResponse()
for group in Group.objects.all():
entry = {
'group': group, 'admin': admin_perm in group.permissions.all(),
'unrestricted': unrestricted_perm in group.permissions.all(),
'add_map': add_map_perm in group.permissions.all()
}
group_list.append(entry)
return TemplateResponse(request, 'global_perms.html',
{'groups': group_list})
@require_map_permission(permission=2)
def purge_signatures(request, map_id, ms_id):
if not request.is_ajax():
raise PermissionDenied
mapsys = get_object_or_404(MapSystem, pk=ms_id)
if request.method == "POST":
mapsys.system.signatures.all().delete()
return HttpResponse()
else:
return HttpResponse(status=400)
| viarr/eve-wspace | evewspace/Map/views.py | Python | gpl-3.0 | 43,116 |
from datetime import datetime
import uuid
class Torrent(object):
def __init__(self):
self.tracker = None
self.url = None
self.title = None
self.magnet = None
self.seeders = None
self.leechers = None
self.size = None
self.date = None
self.details = None
self.uuid = uuid.uuid4().hex
self._remove = False
@property
def human_age(self):
if self.date:
age = datetime.now() - self.date
return "%s days" % (int(age.total_seconds()/(60*60*24)))
else:
return "Unknown"
@property
def human_size(self):
if self.size:
if self.size > 1000000000:
return "%.2f GB" % (self.size / 1000000000)
elif self.size > 1000000:
return "%.2f MB" % (self.size/1000000)
else:
return "%s KB" % (self.size/1000)
@property
def html_friendly_title(self):
return self.title.replace('.', '.​').replace('[', '​[').replace(']', ']​')
def __unicode__(self):
return "%s Size: %s Seeders: %s Age: %s %s" % (self.title.ljust(60)[0:60], str(self.human_size).ljust(12),
str(self.seeders).ljust(6), self.human_age,
self.tracker)
def __str__(self):
return self.__unicode__()
| stopstop/duvet | duvet/objects.py | Python | gpl-3.0 | 1,485 |
import math
import os
import re
import itertools
from types import LambdaType
import pkg_resources
import numpy
from PyQt4 import QtGui, QtCore, QtWebKit
from PyQt4.QtCore import Qt, pyqtSignal as Signal
from PyQt4.QtGui import QCursor, QApplication
import Orange.data
from Orange.widgets.utils import getdeepattr
from Orange.data import ContinuousVariable, StringVariable, DiscreteVariable, Variable
from Orange.widgets.utils import vartype
from Orange.widgets.utils.constants import CONTROLLED_ATTRIBUTES, ATTRIBUTE_CONTROLLERS
from Orange.util import namegen
YesNo = NoYes = ("No", "Yes")
_enter_icon = None
__re_label = re.compile(r"(^|[^%])%\((?P<value>[a-zA-Z]\w*)\)")
OrangeUserRole = itertools.count(Qt.UserRole)
LAMBDA_NAME = namegen('_lambda_')
def resource_filename(path):
"""
Return a resource filename (package data) for path.
"""
return pkg_resources.resource_filename(__name__, path)
class TableWidget(QtGui.QTableWidget):
""" An easy to use, row-oriented table widget """
ROW_DATA_ROLE = QtCore.Qt.UserRole + 1
ITEM_DATA_ROLE = ROW_DATA_ROLE + 1
class TableWidgetNumericItem(QtGui.QTableWidgetItem):
"""TableWidgetItem that sorts numbers correctly!"""
def __lt__(self, other):
return (self.data(TableWidget.ITEM_DATA_ROLE) <
other.data(TableWidget.ITEM_DATA_ROLE))
def selectionChanged(self, selected:[QtGui.QItemSelectionRange], deselected:[QtGui.QItemSelectionRange]):
"""Override or monkey-patch this method to catch selection changes"""
super().selectionChanged(selected, deselected)
def __setattr__(self, attr, value):
"""
The following selectionChanged magic ensures selectionChanged
slot, when monkey-patched, always calls the super's selectionChanged
first (--> avoids Qt quirks), and the user needs not care about that.
"""
if attr == 'selectionChanged':
func = value
@QtCore.pyqtSlot(QtGui.QItemSelection, QtGui.QItemSelection)
def _f(selected, deselected):
super(self.__class__, self).selectionChanged(selected, deselected)
func(selected, deselected)
value = _f
self.__dict__[attr] = value
def _update_headers(func):
"""Decorator to update certain table features after method calls"""
def _f(self, *args, **kwargs):
func(self, *args, **kwargs)
if self.col_labels is not None:
self.setHorizontalHeaderLabels(self.col_labels)
if self.row_labels is not None:
self.setVerticalHeaderLabels(self.row_labels)
if self.stretch_last_section:
self.horizontalHeader().setStretchLastSection(True)
return _f
@_update_headers
def __init__(self,
parent=None,
col_labels=None,
row_labels=None,
stretch_last_section=True,
multi_selection=False,
select_rows=False):
"""
Parameters
----------
parent: QObject
Parent QObject. If parent has layout(), this widget is added to it.
col_labels: list of str
Labels or [] (sequential numbers) or None (no horizontal header)
row_label: list_of_str
Labels or [] (sequential numbers) or None (no vertical header)
stretch_last_section: bool
multi_selection: bool
Single selection if False
select_rows: bool
If True, select whole rows instead of individual cells.
"""
super().__init__(parent)
self._column_filter = {}
self.col_labels = col_labels
self.row_labels = row_labels
self.stretch_last_section = stretch_last_section
try: parent.layout().addWidget(self)
except (AttributeError, TypeError): pass
if col_labels is None:
self.horizontalHeader().setVisible(False)
if row_labels is None:
self.verticalHeader().setVisible(False)
if multi_selection:
self.setSelectionMode(self.MultiSelection)
if select_rows:
self.setSelectionBehavior(self.SelectRows)
self.setHorizontalScrollMode(self.ScrollPerPixel)
self.setVerticalScrollMode(self.ScrollPerPixel)
self.setEditTriggers(self.NoEditTriggers)
self.setAlternatingRowColors(True)
self.setShowGrid(False)
self.setSortingEnabled(True)
@_update_headers
def addRow(self, items:tuple, data=None):
"""
Appends iterable of `items` as the next row, optionally setting row
data to `data`. Each item of `items` can be a string or tuple
(item_name, item_data) if individual, cell-data is required.
"""
row_data = data
row = self.rowCount()
self.insertRow(row)
col_count = max(len(items), self.columnCount())
if col_count != self.columnCount():
self.setColumnCount(col_count)
for col, item_data in enumerate(items):
if isinstance(item_data, str):
name = item_data
elif hasattr(item_data, '__iter__') and len(item_data) == 2:
name, item_data = item_data
elif isinstance(item_data, float):
name = '{:.4f}'.format(item_data)
else:
name = str(item_data)
if isinstance(item_data, (float, int, numpy.number)):
item = self.TableWidgetNumericItem(name)
else:
item = QtGui.QTableWidgetItem(name)
item.setData(self.ITEM_DATA_ROLE, item_data)
if col in self._column_filter:
item = self._column_filter[col](item) or item
self.setItem(row, col, item)
self.resizeColumnsToContents()
self.resizeRowsToContents()
if row_data is not None:
self.setRowData(row, row_data)
def rowData(self, row:int):
return self.item(row, 0).data(self.ROW_DATA_ROLE)
def setRowData(self, row:int, data):
self.item(row, 0).setData(self.ROW_DATA_ROLE, data)
def setColumnFilter(self, item_filter_func, columns:int or list):
"""
Pass item(s) at column(s) through `item_filter_func` before
insertion. Useful for setting specific columns to bold or similar.
"""
try: iter(columns)
except TypeError: columns = [columns]
for i in columns:
self._column_filter[i] = item_filter_func
def clear(self):
super().clear()
self.setRowCount(0)
self.setColumnCount(0)
def selectFirstRow(self):
if self.rowCount() > 0:
self.selectRow(0)
def selectRowsWhere(self, col, value, n_hits=-1,
flags=QtCore.Qt.MatchExactly, _select=True):
"""
Select (also return) at most `n_hits` rows where column `col`
has value (``data()``) `value`.
"""
model = self.model()
matches = model.match(model.index(0, col),
self.ITEM_DATA_ROLE,
value,
n_hits,
flags)
model = self.selectionModel()
selection_flag = model.Select if _select else model.Deselect
for index in matches:
if _select ^ model.isSelected(index):
model.select(index, selection_flag | model.Rows)
return matches
def deselectRowsWhere(self, col, value, n_hits=-1,
flags=QtCore.Qt.MatchExactly):
"""
Deselect (also return) at most `n_hits` rows where column `col`
has value (``data()``) `value`.
"""
return self.selectRowsWhere(col, value, n_hits, flags, False)
class WebviewWidget(QtWebKit.QWebView):
"""WebKit window in a window"""
def __init__(self, parent=None, bridge=None, html=None, debug=None):
"""
Parameters
----------
parent: QObject
Parent QObject. If parent has layout(), this widget is added to it.
bridge: QObject
The "bridge" object exposed as ``window.pybridge`` in JavaScript.
Any bridge methods desired to be accessible from JS need to be
decorated ``@QtCore.pyqtSlot(<*args>, result=<type>)``.
html: str
HTML content to set in the webview.
debug: bool
If True, enable context menu and webkit inspector.
"""
super().__init__(parent)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding))
self._bridge = bridge
try: parent.layout().addWidget(self)
except (AttributeError, TypeError): pass
settings = self.settings()
settings.setAttribute(settings.LocalContentCanAccessFileUrls, True)
if debug is None:
import logging
debug = logging.getLogger().level <= logging.DEBUG
if debug:
settings.setAttribute(settings.DeveloperExtrasEnabled, True)
else:
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
if html:
self.setHtml(html)
def setContent(self, data, mimetype, url=''):
super().setContent(data, mimetype, QtCore.QUrl(url))
if self._bridge:
self.page().mainFrame().addToJavaScriptWindowObject('pybridge', self._bridge)
def setHtml(self, html, url=''):
self.setContent(html.encode('utf-8'), 'text/html', url)
def sizeHint(self):
return QtCore.QSize(600, 500)
def evalJS(self, javascript):
self.page().mainFrame().evaluateJavaScript(javascript)
class ControlledAttributesDict(dict):
def __init__(self, master):
super().__init__()
self.master = master
def __setitem__(self, key, value):
if key not in self:
dict.__setitem__(self, key, [value])
else:
dict.__getitem__(self, key).append(value)
set_controllers(self.master, key, self.master, "")
callbacks = lambda obj: getattr(obj, CONTROLLED_ATTRIBUTES, {})
subcontrollers = lambda obj: getattr(obj, ATTRIBUTE_CONTROLLERS, {})
def notify_changed(obj, name, value):
if name in callbacks(obj):
for callback in callbacks(obj)[name]:
callback(value)
return
for controller, prefix in list(subcontrollers(obj)):
if getdeepattr(controller, prefix, None) != obj:
del subcontrollers(obj)[(controller, prefix)]
continue
full_name = prefix + "." + name
if full_name in callbacks(controller):
for callback in callbacks(controller)[full_name]:
callback(value)
continue
prefix = full_name + "."
prefix_length = len(prefix)
for controlled in callbacks(controller):
if controlled[:prefix_length] == prefix:
set_controllers(value, controlled[prefix_length:], controller, full_name)
def set_controllers(obj, controlled_name, controller, prefix):
while obj:
if prefix:
if hasattr(obj, ATTRIBUTE_CONTROLLERS):
getattr(obj, ATTRIBUTE_CONTROLLERS)[(controller, prefix)] = True
else:
setattr(obj, ATTRIBUTE_CONTROLLERS, {(controller, prefix): True})
parts = controlled_name.split(".", 1)
if len(parts) < 2:
break
new_prefix, controlled_name = parts
obj = getattr(obj, new_prefix, None)
if prefix:
prefix += '.'
prefix += new_prefix
class OWComponent:
def __init__(self, widget):
setattr(self, CONTROLLED_ATTRIBUTES, ControlledAttributesDict(self))
if widget.settingsHandler:
widget.settingsHandler.initialize(self)
def __setattr__(self, key, value):
super().__setattr__(key, value)
notify_changed(self, key, value)
def miscellanea(control, box, parent,
addToLayout=True, stretch=0, sizePolicy=None, addSpace=False,
disabled=False, tooltip=None):
"""
Helper function that sets various properties of the widget using a common
set of arguments.
The function
- sets the `control`'s attribute `box`, if `box` is given and `control.box`
is not yet set,
- attaches a tool tip to the `control` if specified,
- disables the `control`, if `disabled` is set to `True`,
- adds the `box` to the `parent`'s layout unless `addToLayout` is set to
`False`; the stretch factor can be specified,
- adds the control into the box's layout if the box is given (regardless
of `addToLayout`!)
- sets the size policy for the box or the control, if the policy is given,
- adds space in the `parent`'s layout after the `box` if `addSpace` is set
and `addToLayout` is not `False`.
If `box` is the same as `parent` it is set to `None`; this is convenient
because of the way complex controls are inserted.
:param control: the control, e.g. a `QCheckBox`
:type control: PyQt4.QtGui.QWidget
:param box: the box into which the widget was inserted
:type box: PyQt4.QtGui.QWidget or None
:param parent: the parent into whose layout the box or the control will be
inserted
:type parent: PyQt4.QtGui.QWidget
:param addSpace: the amount of space to add after the widget
:type addSpace: bool or int
:param disabled: If set to `True`, the widget is initially disabled
:type disabled: bool
:param addToLayout: If set to `False` the widget is not added to the layout
:type addToLayout: bool
:param stretch: the stretch factor for this widget, used when adding to
the layout (default: 0)
:type stretch: int
:param tooltip: tooltip that is attached to the widget
:type tooltip: str or None
:param sizePolicy: the size policy for the box or the control
:type sizePolicy: PyQt4.QtQui.QSizePolicy
"""
if disabled:
# if disabled==False, do nothing; it can be already disabled
control.setDisabled(disabled)
if tooltip is not None:
control.setToolTip(tooltip)
if box is parent:
box = None
elif box and box is not control and not hasattr(control, "box"):
control.box = box
if box and box.layout() is not None and \
isinstance(control, QtGui.QWidget) and \
box.layout().indexOf(control) == -1:
box.layout().addWidget(control)
if sizePolicy is not None:
(box or control).setSizePolicy(sizePolicy)
if addToLayout and parent and parent.layout() is not None:
parent.layout().addWidget(box or control, stretch)
_addSpace(parent, addSpace)
def setLayout(widget, orientation):
"""
Set the layout of the widget according to orientation. Argument
`orientation` can be an instance of :obj:`~PyQt4.QtGui.QLayout`, in which
case is it used as it is. If `orientation` is `'vertical'` or `True`,
the layout is set to :obj:`~PyQt4.QtGui.QVBoxLayout`. If it is
`'horizontal'` or `False`, it is set to :obj:`~PyQt4.QtGui.QVBoxLayout`.
:param widget: the widget for which the layout is being set
:type widget: PyQt4.QtGui.QWidget
:param orientation: orientation for the layout
:type orientation: str or bool or PyQt4.QtGui.QLayout
"""
if isinstance(orientation, QtGui.QLayout):
widget.setLayout(orientation)
elif orientation == 'horizontal' or not orientation:
widget.setLayout(QtGui.QHBoxLayout())
else:
widget.setLayout(QtGui.QVBoxLayout())
def _enterButton(parent, control, placeholder=True):
"""
Utility function that returns a button with a symbol for "Enter" and
optionally a placeholder to show when the enter button is hidden. Both
are inserted into the parent's layout, if it has one. If placeholder is
constructed it is shown and the button is hidden.
The height of the button is the same as the height of the widget passed
as argument `control`.
:param parent: parent widget into which the button is inserted
:type parent: PyQt4.QtGui.QWidget
:param control: a widget for determining the height of the button
:type control: PyQt4.QtGui.QWidget
:param placeholder: a flag telling whether to construct a placeholder
(default: True)
:type placeholder: bool
:return: a tuple with a button and a place holder (or `None`)
:rtype: PyQt4.QtGui.QToolButton or tuple
"""
global _enter_icon
if not _enter_icon:
_enter_icon = QtGui.QIcon(
os.path.dirname(__file__) + "/icons/Dlg_enter.png")
button = QtGui.QToolButton(parent)
height = control.sizeHint().height()
button.setFixedSize(height, height)
button.setIcon(_enter_icon)
if parent.layout() is not None:
parent.layout().addWidget(button)
if placeholder:
button.hide()
holder = QtGui.QWidget(parent)
holder.setFixedSize(height, height)
if parent.layout() is not None:
parent.layout().addWidget(holder)
else:
holder = None
return button, holder
def _addSpace(widget, space):
"""
A helper function that adds space into the widget, if requested.
The function is called by functions that have the `addSpace` argument.
:param widget: Widget into which to insert the space
:type widget: PyQt4.QtGui.QWidget
:param space: Amount of space to insert. If False, the function does
nothing. If the argument is an `int`, the specified space is inserted.
Otherwise, the default space is inserted by calling a :obj:`separator`.
:type space: bool or int
"""
if space:
if type(space) == int: # distinguish between int and bool!
separator(widget, space, space)
else:
separator(widget)
def separator(widget, width=4, height=4):
"""
Add a separator of the given size into the widget.
:param widget: the widget into whose layout the separator is added
:type widget: PyQt4.QtGui.QWidget
:param width: width of the separator
:type width: int
:param height: height of the separator
:type height: int
:return: separator
:rtype: PyQt4.QtGui.QWidget
"""
sep = QtGui.QWidget(widget)
if widget.layout() is not None:
widget.layout().addWidget(sep)
sep.setFixedSize(width, height)
return sep
def rubber(widget):
"""
Insert a stretch 100 into the widget's layout
"""
widget.layout().addStretch(100)
def widgetBox(widget, box=None, orientation='vertical', margin=None, spacing=4,
**misc):
"""
Construct a box with vertical or horizontal layout, and optionally,
a border with an optional label.
If the widget has a frame, the space after the widget is added unless
explicitly disabled.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param orientation: orientation for the layout. If the argument is an
instance of :obj:`~PyQt4.QtGui.QLayout`, it is used as a layout. If
"horizontal" or false-ish, the layout is horizontal
(:obj:`~PyQt4.QtGui.QHBoxLayout`), otherwise vertical
(:obj:`~PyQt4.QtGui.QHBoxLayout`).
:type orientation: str, int or :obj:`PyQt4.QtGui.QLayout`
:param sizePolicy: The size policy for the widget (default: None)
:type sizePolicy: :obj:`~PyQt4.QtGui.QSizePolicy`
:param margin: The margin for the layout. Default is 7 if the widget has
a border, and 0 if not.
:type margin: int
:param spacing: Spacing within the layout (default: 4)
:type spacing: int
:return: Constructed box
:rtype: PyQt4.QtGui.QGroupBox or PyQt4.QtGui.QWidget
"""
if box:
b = QtGui.QGroupBox(widget)
if isinstance(box, str):
b.setTitle(" " + box.strip() + " ")
if margin is None:
margin = 7
else:
b = QtGui.QWidget(widget)
b.setContentsMargins(0, 0, 0, 0)
if margin is None:
margin = 0
setLayout(b, orientation)
b.layout().setSpacing(spacing)
b.layout().setMargin(margin)
misc.setdefault('addSpace', bool(box))
miscellanea(b, None, widget, **misc)
return b
def indentedBox(widget, sep=20, orientation="vertical", **misc):
"""
Creates an indented box. The function can also be used "on the fly"::
gui.checkBox(gui.indentedBox(box), self, "spam", "Enable spam")
To align the control with a check box, use :obj:`checkButtonOffsetHint`::
gui.hSlider(gui.indentedBox(self.interBox), self, "intervals")
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget
:param sep: Indent size (default: 20)
:type sep: int
:param orientation: layout of the inserted box; see :obj:`widgetBox` for
details
:type orientation: str, int or PyQt4.QtGui.QLayout
:return: Constructed box
:rtype: PyQt4.QtGui.QGroupBox or PyQt4.QtGui.QWidget
"""
outer = widgetBox(widget, orientation=False, spacing=0)
separator(outer, sep, 0)
indented = widgetBox(outer, orientation=orientation)
miscellanea(indented, outer, widget, **misc)
return indented
def widgetLabel(widget, label="", labelWidth=None, **misc):
"""
Construct a simple, constant label.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param label: The text of the label (default: None)
:type label: str
:param labelWidth: The width of the label (default: None)
:type labelWidth: int
:return: Constructed label
:rtype: PyQt4.QtGui.QLabel
"""
lbl = QtGui.QLabel(label, widget)
if labelWidth:
lbl.setFixedSize(labelWidth, lbl.sizeHint().height())
miscellanea(lbl, None, widget, **misc)
return lbl
def label(widget, master, label, labelWidth=None, box=None,
orientation="vertical", **misc):
"""
Construct a label that contains references to the master widget's
attributes; when their values change, the label is updated.
Argument :obj:`label` is a format string following Python's syntax
(see the corresponding Python documentation): the label's content is
rendered as `label % master.__dict__`. For instance, if the
:obj:`label` is given as "There are %(mm)i monkeys", the value of
`master.mm` (which must be an integer) will be inserted in place of
`%(mm)i`.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param label: The text of the label, including attribute names
:type label: str
:param labelWidth: The width of the label (default: None)
:type labelWidth: int
:return: label
:rtype: PyQt4.QtGui.QLabel
"""
if box:
b = widgetBox(widget, box, orientation=None, addToLayout=False)
else:
b = widget
lbl = QtGui.QLabel("", b)
reprint = CallFrontLabel(lbl, label, master)
for mo in __re_label.finditer(label):
getattr(master, CONTROLLED_ATTRIBUTES)[mo.group("value")] = reprint
reprint()
if labelWidth:
lbl.setFixedSize(labelWidth, lbl.sizeHint().height())
miscellanea(lbl, b, widget, **misc)
return lbl
class SpinBoxWFocusOut(QtGui.QSpinBox):
"""
A class derived from QtGui.QSpinBox, which postpones the synchronization
of the control's value with the master's attribute until the user presses
Enter or clicks an icon that appears beside the spin box when the value
is changed.
The class overloads :obj:`onChange` event handler to show the commit button,
and :obj:`onEnter` to commit the change when enter is pressed.
.. attribute:: enterButton
A widget (usually an icon) that is shown when the value is changed.
.. attribute:: placeHolder
A placeholder which is shown when the button is hidden
.. attribute:: inSetValue
A flag that is set when the value is being changed through
:obj:`setValue` to prevent the programmatic changes from showing the
commit button.
"""
def __init__(self, minv, maxv, step, parent=None):
"""
Construct the object and set the range (`minv`, `maxv`) and the step.
:param minv: Minimal value
:type minv: int
:param maxv: Maximal value
:type maxv: int
:param step: Step
:type step: int
:param parent: Parent widget
:type parent: PyQt4.QtGui.QWidget
"""
super().__init__(parent)
self.setRange(minv, maxv)
self.setSingleStep(step)
self.inSetValue = False
self.enterButton = None
self.placeHolder = None
def onChange(self, _):
"""
Hides the place holder and shows the commit button unless
:obj:`inSetValue` is set.
"""
if not self.inSetValue:
self.placeHolder.hide()
self.enterButton.show()
def onEnter(self):
"""
If the commit button is visible, the overload event handler commits
the change by calling the appropriate callbacks. It also hides the
commit button and shows the placeHolder.
"""
if self.enterButton.isVisible():
self.enterButton.hide()
self.placeHolder.show()
if self.cback:
self.cback(int(str(self.text())))
if self.cfunc:
self.cfunc()
# doesn't work: it's probably LineEdit's focusOut that we should
# (but can't) catch
def focusOutEvent(self, *e):
"""
This handler was intended to catch the focus out event and reintepret
it as if enter was pressed. It does not work, though.
"""
super().focusOutEvent(*e)
if self.enterButton and self.enterButton.isVisible():
self.onEnter()
def setValue(self, value):
"""
Set the :obj:`inSetValue` flag and call the inherited method.
"""
self.inSetValue = True
super().setValue(value)
self.inSetValue = False
class DoubleSpinBoxWFocusOut(QtGui.QDoubleSpinBox):
"""
Same as :obj:`SpinBoxWFocusOut`, except that it is derived from
:obj:`~PyQt4.QtGui.QDoubleSpinBox`"""
def __init__(self, minv, maxv, step, parent):
super().__init__(parent)
self.setDecimals(math.ceil(-math.log10(step)))
self.setRange(minv, maxv)
self.setSingleStep(step)
self.inSetValue = False
self.enterButton = None
self.placeHolder = None
def onChange(self, _):
if not self.inSetValue:
self.placeHolder.hide()
self.enterButton.show()
def onEnter(self):
if self.enterButton.isVisible():
self.enterButton.hide()
self.placeHolder.show()
if self.cback:
self.cback(float(str(self.text()).replace(",", ".")))
if self.cfunc:
self.cfunc()
# doesn't work: it's probably LineEdit's focusOut that we should
# (and can't) catch
def focusOutEvent(self, *e):
super().focusOutEvent(*e)
if self.enterButton and self.enterButton.isVisible():
self.onEnter()
def setValue(self, value):
self.inSetValue = True
super().setValue(value)
self.inSetValue = False
def spin(widget, master, value, minv, maxv, step=1, box=None, label=None,
labelWidth=None, orientation=None, callback=None,
controlWidth=None, callbackOnReturn=False, checked=None,
checkCallback=None, posttext=None, disabled=False,
alignment=Qt.AlignLeft, keyboardTracking=True,
decimals=None, spinType=int, **misc):
"""
A spinbox with lots of bells and whistles, such as a checkbox and various
callbacks. It constructs a control of type :obj:`SpinBoxWFocusOut` or
:obj:`DoubleSpinBoxWFocusOut`.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param minv: minimal value
:type minv: int
:param maxv: maximal value
:type maxv: int
:param step: step (default: 1)
:type step: int
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param label: label that is put in above or to the left of the spin box
:type label: str
:param labelWidth: optional label width (default: None)
:type labelWidth: int
:param orientation: tells whether to put the label above (`"vertical"` or
`True`) or to the left (`"horizontal"` or `False`)
:type orientation: int or bool or str
:param callback: a function that is called when the value is entered; if
:obj:`callbackOnReturn` is `True`, the function is called when the
user commits the value by pressing Enter or clicking the icon
:type callback: function
:param controlWidth: the width of the spin box
:type controlWidth: int
:param callbackOnReturn: if `True`, the spin box has an associated icon
that must be clicked to confirm the value (default: False)
:type callbackOnReturn: bool
:param checked: if not None, a check box is put in front of the spin box;
when unchecked, the spin box is disabled. Argument `checked` gives the
name of the master's attribute given whose value is synchronized with
the check box's state (default: None).
:type checked: str
:param checkCallback: a callback function that is called when the check
box's state is changed
:type checkCallback: function
:param posttext: a text that is put to the right of the spin box
:type posttext: str
:param alignment: alignment of the spin box (e.g. `QtCore.Qt.AlignLeft`)
:type alignment: PyQt4.QtCore.Qt.Alignment
:param keyboardTracking: If `True`, the valueChanged signal is emitted
when the user is typing (default: True)
:type keyboardTracking: bool
:param spinType: determines whether to use QSpinBox (int) or
QDoubleSpinBox (float)
:type spinType: type
:param decimals: number of decimals (if `spinType` is `float`)
:type decimals: int
:return: Tuple `(spin box, check box) if `checked` is `True`, otherwise
the spin box
:rtype: tuple or gui.SpinBoxWFocusOut
"""
# b is the outermost box or the widget if there are no boxes;
# b is the widget that is inserted into the layout
# bi is the box that contains the control or the checkbox and the control;
# bi can be the widget itself, if there are no boxes
# cbox is the checkbox (or None)
# sbox is the spinbox itself
if box or label and not checked:
b = widgetBox(widget, box, orientation, addToLayout=False)
hasHBox = orientation == 'horizontal' or not orientation
else:
b = widget
hasHBox = False
if not hasHBox and (checked or callback and callbackOnReturn or posttext):
bi = widgetBox(b, orientation=0, addToLayout=False)
else:
bi = b
cbox = None
if checked is not None:
cbox = checkBox(bi, master, checked, label, labelWidth=labelWidth,
callback=checkCallback)
elif label:
b.label = widgetLabel(b, label, labelWidth)
if posttext:
widgetLabel(bi, posttext)
isDouble = spinType == float
sbox = bi.control = \
(SpinBoxWFocusOut, DoubleSpinBoxWFocusOut)[isDouble](minv, maxv,
step, bi)
if bi is not widget:
bi.setDisabled(disabled)
else:
sbox.setDisabled(disabled)
if decimals is not None:
sbox.setDecimals(decimals)
sbox.setAlignment(alignment)
sbox.setKeyboardTracking(keyboardTracking)
if controlWidth:
sbox.setFixedWidth(controlWidth)
if value:
sbox.setValue(getdeepattr(master, value))
cfront, sbox.cback, sbox.cfunc = connectControl(
master, value, callback,
not (callback and callbackOnReturn) and
sbox.valueChanged[(int, float)[isDouble]],
(CallFrontSpin, CallFrontDoubleSpin)[isDouble](sbox))
if checked:
cbox.disables = [sbox]
cbox.makeConsistent()
if callback and callbackOnReturn:
sbox.enterButton, sbox.placeHolder = _enterButton(bi, sbox)
sbox.valueChanged[str].connect(sbox.onChange)
sbox.editingFinished.connect(sbox.onEnter)
sbox.enterButton.clicked.connect(sbox.onEnter)
if hasattr(sbox, "upButton"):
sbox.upButton().clicked.connect(
lambda c=sbox.editor(): c.setFocus())
sbox.downButton().clicked.connect(
lambda c=sbox.editor(): c.setFocus())
miscellanea(sbox, b if b is not widget else bi, widget, **misc)
if checked:
if isDouble and b == widget:
# TODO Backward compatilibity; try to find and eliminate
sbox.control = b.control
return sbox
return cbox, sbox
else:
return sbox
# noinspection PyTypeChecker
def doubleSpin(widget, master, value, minv, maxv, step=1, box=None, label=None,
labelWidth=None, orientation=None, callback=None,
controlWidth=None, callbackOnReturn=False, checked=None,
checkCallback=None, posttext=None,
alignment=Qt.AlignLeft, keyboardTracking=True,
decimals=None, **misc):
"""
Backward compatilibity function: calls :obj:`spin` with `spinType=float`.
"""
return spin(widget, master, value, minv, maxv, step, box=box, label=label,
labelWidth=labelWidth, orientation=orientation,
callback=callback, controlWidth=controlWidth,
callbackOnReturn=callbackOnReturn, checked=checked,
checkCallback=checkCallback, posttext=posttext,
alignment=alignment, keyboardTracking=keyboardTracking,
decimals=decimals, spinType=float, **misc)
def checkBox(widget, master, value, label, box=None,
callback=None, getwidget=False, id_=None, labelWidth=None,
disables=None, **misc):
"""
A simple checkbox.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param label: label
:type label: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param callback: a function that is called when the check box state is
changed
:type callback: function
:param getwidget: If set `True`, the callback function will get a keyword
argument `widget` referencing the check box
:type getwidget: bool
:param id_: If present, the callback function will get a keyword argument
`id` with this value
:type id_: any
:param labelWidth: the width of the label
:type labelWidth: int
:param disables: a list of widgets that are disabled if the check box is
unchecked
:type disables: list or PyQt4.QtGui.QWidget or None
:return: constructed check box; if is is placed within a box, the box is
return in the attribute `box`
:rtype: PyQt4.QtGui.QCheckBox
"""
if box:
b = widgetBox(widget, box, orientation=None, addToLayout=False)
else:
b = widget
cbox = QtGui.QCheckBox(label, b)
if labelWidth:
cbox.setFixedSize(labelWidth, cbox.sizeHint().height())
cbox.setChecked(getdeepattr(master, value))
connectControl(master, value, None, cbox.toggled[bool],
CallFrontCheckBox(cbox),
cfunc=callback and FunctionCallback(
master, callback, widget=cbox, getwidget=getwidget,
id=id_))
if isinstance(disables, QtGui.QWidget):
disables = [disables]
cbox.disables = disables or []
cbox.makeConsistent = Disabler(cbox, master, value)
cbox.toggled[bool].connect(cbox.makeConsistent)
cbox.makeConsistent(value)
miscellanea(cbox, b, widget, **misc)
return cbox
class LineEditWFocusOut(QtGui.QLineEdit):
"""
A class derived from QtGui.QLineEdit, which postpones the synchronization
of the control's value with the master's attribute until the user leaves
the line edit, presses Enter or clicks an icon that appears beside the
line edit when the value is changed.
The class also allows specifying a callback function for focus-in event.
.. attribute:: enterButton
A widget (usually an icon) that is shown when the value is changed.
.. attribute:: placeHolder
A placeholder which is shown when the button is hidden
.. attribute:: inSetValue
A flag that is set when the value is being changed through
:obj:`setValue` to prevent the programmatic changes from showing the
commit button.
.. attribute:: callback
Callback that is called when the change is confirmed
.. attribute:: focusInCallback
Callback that is called on the focus-in event
"""
def __init__(self, parent, callback, focusInCallback=None,
placeholder=False):
super().__init__(parent)
if parent.layout() is not None:
parent.layout().addWidget(self)
self.callback = callback
self.focusInCallback = focusInCallback
self.enterButton, self.placeHolder = \
_enterButton(parent, self, placeholder)
self.enterButton.clicked.connect(self.returnPressedHandler)
self.textChanged[str].connect(self.markChanged)
self.returnPressed.connect(self.returnPressedHandler)
def markChanged(self, *_):
if self.placeHolder:
self.placeHolder.hide()
self.enterButton.show()
def markUnchanged(self, *_):
self.enterButton.hide()
if self.placeHolder:
self.placeHolder.show()
def returnPressedHandler(self):
if self.enterButton.isVisible():
self.markUnchanged()
if hasattr(self, "cback") and self.cback:
self.cback(self.text())
if self.callback:
self.callback()
def setText(self, t):
super().setText(t)
if self.enterButton:
self.markUnchanged()
def focusOutEvent(self, *e):
super().focusOutEvent(*e)
self.returnPressedHandler()
def focusInEvent(self, *e):
if self.focusInCallback:
self.focusInCallback()
return super().focusInEvent(*e)
def lineEdit(widget, master, value, label=None, labelWidth=None,
orientation='vertical', box=None, callback=None,
valueType=str, validator=None, controlWidth=None,
callbackOnType=False, focusInCallback=None,
enterPlaceholder=False, **misc):
"""
Insert a line edit.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param label: label
:type label: str
:param labelWidth: the width of the label
:type labelWidth: int
:param orientation: tells whether to put the label above (`"vertical"` or
`True`) or to the left (`"horizontal"` or `False`)
:type orientation: int or bool or str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param callback: a function that is called when the check box state is
changed
:type callback: function
:param valueType: the type into which the entered string is converted
when synchronizing to `value`
:type valueType: type
:param validator: the validator for the input
:type validator: PyQt4.QtGui.QValidator
:param controlWidth: the width of the line edit
:type controlWidth: int
:param callbackOnType: if set to `True`, the callback is called at each
key press (default: `False`)
:type callbackOnType: bool
:param focusInCallback: a function that is called when the line edit
receives focus
:type focusInCallback: function
:param enterPlaceholder: if set to `True`, space of appropriate width is
left empty to the right for the icon that shows that the value is
changed but has not been committed yet
:type enterPlaceholder: bool
:rtype: PyQt4.QtGui.QLineEdit or a box
"""
if box or label:
b = widgetBox(widget, box, orientation, addToLayout=False)
if label is not None:
widgetLabel(b, label, labelWidth)
hasHBox = orientation == 'horizontal' or not orientation
else:
b = widget
hasHBox = False
baseClass = misc.pop("baseClass", None)
if baseClass:
ledit = baseClass(b)
ledit.enterButton = None
if b is not widget:
b.layout().addWidget(ledit)
elif focusInCallback or callback and not callbackOnType:
if not hasHBox:
outer = widgetBox(b, "", 0, addToLayout=(b is not widget))
else:
outer = b
ledit = LineEditWFocusOut(outer, callback, focusInCallback,
enterPlaceholder)
else:
ledit = QtGui.QLineEdit(b)
ledit.enterButton = None
if b is not widget:
b.layout().addWidget(ledit)
if value:
ledit.setText(str(getdeepattr(master, value)))
if controlWidth:
ledit.setFixedWidth(controlWidth)
if validator:
ledit.setValidator(validator)
if value:
ledit.cback = connectControl(
master, value,
callbackOnType and callback, ledit.textChanged[str],
CallFrontLineEdit(ledit), fvcb=value and valueType)[1]
miscellanea(ledit, b, widget, **misc)
return ledit
def button(widget, master, label, callback=None, width=None, height=None,
toggleButton=False, value="", default=False, autoDefault=True,
buttonType=QtGui.QPushButton, **misc):
"""
Insert a button (QPushButton, by default)
:param widget: the widget into which the button is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param label: label
:type label: str
:param callback: a function that is called when the button is pressed
:type callback: function
:param width: the width of the button
:type width: int
:param height: the height of the button
:type height: int
:param toggleButton: if set to `True`, the button is checkable, but it is
not synchronized with any attribute unless the `value` is given
:type toggleButton: bool
:param value: the master's attribute with which the value is synchronized
(the argument is optional; if present, it makes the button "checkable",
even if `toggleButton` is not set)
:type value: str
:param default: if `True` it makes the button the default button; this is
the button that is activated when the user presses Enter unless some
auto default button has current focus
:type default: bool
:param autoDefault: all buttons are auto default: they are activated if
they have focus (or are the next in the focus chain) when the user
presses enter. By setting `autoDefault` to `False`, the button is not
activated on pressing Return.
:type autoDefault: bool
:param buttonType: the button type (default: `QPushButton`)
:type buttonType: PyQt4.QtGui.QAbstractButton
:rtype: PyQt4.QtGui.QAbstractButton
"""
button = buttonType(widget)
if label:
button.setText(label)
if width:
button.setFixedWidth(width)
if height:
button.setFixedHeight(height)
if toggleButton or value:
button.setCheckable(True)
if buttonType == QtGui.QPushButton:
button.setDefault(default)
button.setAutoDefault(autoDefault)
if value:
button.setChecked(getdeepattr(master, value))
connectControl(
master, value, None, button.toggled[bool],
CallFrontButton(button),
cfunc=callback and FunctionCallback(master, callback,
widget=button))
elif callback:
button.clicked.connect(callback)
miscellanea(button, None, widget, **misc)
return button
def toolButton(widget, master, label="", callback=None,
width=None, height=None, tooltip=None):
"""
Insert a tool button. Calls :obj:`button`
:param widget: the widget into which the button is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param label: label
:type label: str
:param callback: a function that is called when the button is pressed
:type callback: function
:param width: the width of the button
:type width: int
:param height: the height of the button
:type height: int
:rtype: PyQt4.QtGui.QToolButton
"""
return button(widget, master, label, callback, width, height,
buttonType=QtGui.QToolButton, tooltip=tooltip)
def createAttributePixmap(char, background=Qt.black, color=Qt.white):
"""
Create a QIcon with a given character. The icon is 13 pixels high and wide.
:param char: The character that is printed in the icon
:type char: str
:param background: the background color (default: black)
:type background: PyQt4.QtGui.QColor
:param color: the character color (default: white)
:type color: PyQt4.QtGui.QColor
:rtype: PyQt4.QtGui.QIcon
"""
pixmap = QtGui.QPixmap(13, 13)
pixmap.fill(QtGui.QColor(0, 0, 0, 0))
painter = QtGui.QPainter()
painter.begin(pixmap)
painter.setRenderHints(painter.Antialiasing | painter.TextAntialiasing |
painter.SmoothPixmapTransform)
painter.setPen(background)
painter.setBrush(background)
rect = QtCore.QRectF(0, 0, 13, 13)
painter.drawRoundedRect(rect, 4, 4)
painter.setPen(color)
painter.drawText(2, 11, char)
painter.end()
return QtGui.QIcon(pixmap)
class __AttributeIconDict(dict):
def __getitem__(self, key):
if not self:
for tpe, char, col in ((vartype(ContinuousVariable()),
"C", (202, 0, 32)),
(vartype(DiscreteVariable()),
"D", (26, 150, 65)),
(vartype(StringVariable()),
"S", (0, 0, 0)),
(-1, "?", (128, 128, 128))):
self[tpe] = createAttributePixmap(char, QtGui.QColor(*col))
if key not in self:
key = vartype(key) if isinstance(key, Variable) else -1
return super().__getitem__(key)
#: A dict that returns icons for different attribute types. The dict is
#: constructed on first use since icons cannot be created before initializing
#: the application.
#:
#: Accepted keys are variable type codes and instances
#: of :obj:`Orange.data.variable`: `attributeIconDict[var]` will give the
#: appropriate icon for variable `var` or a question mark if the type is not
#: recognized
attributeIconDict = __AttributeIconDict()
def attributeItem(var):
"""
Construct a pair (icon, name) for inserting a variable into a combo or
list box
:param var: variable
:type var: Orange.data.Variable
:rtype: tuple with PyQt4.QtGui.QIcon and str
"""
return attributeIconDict[var], var.name
def listBox(widget, master, value=None, labels=None, box=None, callback=None,
selectionMode=QtGui.QListWidget.SingleSelection,
enableDragDrop=False, dragDropCallback=None,
dataValidityCallback=None, sizeHint=None, **misc):
"""
Insert a list box.
The value with which the box's value synchronizes (`master.<value>`)
is a list of indices of selected items.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the name of the master's attribute with which the value is
synchronized (list of ints - indices of selected items)
:type value: str
:param labels: the name of the master's attribute with the list of items
(as strings or tuples with icon and string)
:type labels: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param callback: a function that is called when the selection state is
changed
:type callback: function
:param selectionMode: selection mode - single, multiple etc
:type selectionMode: PyQt4.QtGui.QAbstractItemView.SelectionMode
:param enableDragDrop: flag telling whether drag and drop is available
:type enableDragDrop: bool
:param dragDropCallback: callback function on drop event
:type dragDropCallback: function
:param dataValidityCallback: function that check the validity on enter
and move event; it should return either `ev.accept()` or `ev.ignore()`.
:type dataValidityCallback: function
:param sizeHint: size hint
:type sizeHint: PyQt4.QtGui.QSize
:rtype: OrangeListBox
"""
if box:
bg = widgetBox(widget, box,
orientation="horizontal", addToLayout=False)
else:
bg = widget
lb = OrangeListBox(master, enableDragDrop, dragDropCallback,
dataValidityCallback, sizeHint, bg)
lb.setSelectionMode(selectionMode)
lb.ogValue = value
lb.ogLabels = labels
lb.ogMaster = master
if value is not None:
clist = getdeepattr(master, value)
if not isinstance(clist, ControlledList):
clist = ControlledList(clist, lb)
master.__setattr__(value, clist)
if labels is not None:
setattr(master, labels, getdeepattr(master, labels))
if hasattr(master, CONTROLLED_ATTRIBUTES):
getattr(master, CONTROLLED_ATTRIBUTES)[labels] = CallFrontListBoxLabels(lb)
if value is not None:
setattr(master, value, getdeepattr(master, value))
connectControl(master, value, callback, lb.itemSelectionChanged,
CallFrontListBox(lb), CallBackListBox(lb, master))
misc.setdefault('addSpace', True)
miscellanea(lb, bg, widget, **misc)
return lb
# btnLabels is a list of either char strings or pixmaps
def radioButtons(widget, master, value, btnLabels=(), tooltips=None,
box=None, label=None, orientation='vertical',
callback=None, **misc):
"""
Construct a button group and add radio buttons, if they are given.
The value with which the buttons synchronize is the index of selected
button.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param btnLabels: a list of labels or icons for radio buttons
:type btnLabels: list of str or pixmaps
:param tooltips: a list of tool tips of the same length as btnLabels
:type tooltips: list of str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param label: a label that is inserted into the box
:type label: str
:param callback: a function that is called when the selection is changed
:type callback: function
:param orientation: orientation of the layout in the box
:type orientation: int or str or QLayout
:rtype: PyQt4.QtQui.QButtonGroup
"""
bg = widgetBox(widget, box, orientation, addToLayout=False)
if not label is None:
widgetLabel(bg, label)
rb = QtGui.QButtonGroup(bg)
if bg is not widget:
bg.group = rb
bg.buttons = []
bg.ogValue = value
bg.ogMaster = master
for i, lab in enumerate(btnLabels):
appendRadioButton(bg, lab, tooltip=tooltips and tooltips[i])
connectControl(master, value, callback, bg.group.buttonClicked[int],
CallFrontRadioButtons(bg), CallBackRadioButton(bg, master))
misc.setdefault('addSpace', bool(box))
miscellanea(bg.group, bg, widget, **misc)
return bg
radioButtonsInBox = radioButtons
def appendRadioButton(group, label, insertInto=None,
disabled=False, tooltip=None, sizePolicy=None,
addToLayout=True, stretch=0, addSpace=False):
"""
Construct a radio button and add it to the group. The group must be
constructed with :obj:`radioButtonsInBox` since it adds additional
attributes need for the call backs.
The radio button is inserted into `insertInto` or, if omitted, into the
button group. This is useful for more complex groups, like those that have
radio buttons in several groups, divided by labels and inside indented
boxes.
:param group: the button group
:type group: PyQt4.QtCore.QButtonGroup
:param label: string label or a pixmap for the button
:type label: str or PyQt4.QtGui.QPixmap
:param insertInto: the widget into which the radio button is inserted
:type insertInto: PyQt4.QtGui.QWidget
:rtype: PyQt4.QtGui.QRadioButton
"""
i = len(group.buttons)
if isinstance(label, str):
w = QtGui.QRadioButton(label)
else:
w = QtGui.QRadioButton(str(i))
w.setIcon(QtGui.QIcon(label))
if not hasattr(group, "buttons"):
group.buttons = []
group.buttons.append(w)
group.group.addButton(w)
w.setChecked(getdeepattr(group.ogMaster, group.ogValue) == i)
# miscellanea for this case is weird, so we do it here
if disabled:
w.setDisabled(disabled)
if tooltip is not None:
w.setToolTip(tooltip)
if sizePolicy:
w.setSizePolicy(sizePolicy)
if addToLayout:
dest = insertInto or group
dest.layout().addWidget(w, stretch)
_addSpace(dest, addSpace)
return w
def hSlider(widget, master, value, box=None, minValue=0, maxValue=10, step=1,
callback=None, label=None, labelFormat=" %d", ticks=False,
divideFactor=1.0, vertical=False, createLabel=True, width=None,
intOnly=True, **misc):
"""
Construct a slider.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param label: a label that is inserted into the box
:type label: str
:param callback: a function that is called when the value is changed
:type callback: function
:param minValue: minimal value
:type minValue: int or float
:param maxValue: maximal value
:type maxValue: int or float
:param step: step size
:type step: int or float
:param labelFormat: the label format; default is `" %d"`
:type labelFormat: str
:param ticks: if set to `True`, ticks are added below the slider
:type ticks: bool
:param divideFactor: a factor with which the displayed value is divided
:type divideFactor: float
:param vertical: if set to `True`, the slider is vertical
:type vertical: bool
:param createLabel: unless set to `False`, labels for minimal, maximal
and the current value are added to the widget
:type createLabel: bool
:param width: the width of the slider
:type width: int
:param intOnly: if `True`, the slider value is integer (the slider is
of type :obj:`PyQt4.QtGui.QSlider`) otherwise it is float
(:obj:`FloatSlider`, derived in turn from :obj:`PyQt4.QtQui.QSlider`).
:type intOnly: bool
:rtype: :obj:`PyQt4.QtGui.QSlider` or :obj:`FloatSlider`
"""
sliderBox = widgetBox(widget, box, orientation="horizontal",
addToLayout=False)
if label:
widgetLabel(sliderBox, label)
sliderOrient = Qt.Vertical if vertical else Qt.Horizontal
if intOnly:
slider = QtGui.QSlider(sliderOrient, sliderBox)
slider.setRange(minValue, maxValue)
if step:
slider.setSingleStep(step)
slider.setPageStep(step)
slider.setTickInterval(step)
signal = slider.valueChanged[int]
else:
slider = FloatSlider(sliderOrient, minValue, maxValue, step)
signal = slider.valueChangedFloat[float]
sliderBox.layout().addWidget(slider)
slider.setValue(getdeepattr(master, value))
if width:
slider.setFixedWidth(width)
if ticks:
slider.setTickPosition(QtGui.QSlider.TicksBelow)
slider.setTickInterval(ticks)
if createLabel:
label = QtGui.QLabel(sliderBox)
sliderBox.layout().addWidget(label)
label.setText(labelFormat % minValue)
width1 = label.sizeHint().width()
label.setText(labelFormat % maxValue)
width2 = label.sizeHint().width()
label.setFixedSize(max(width1, width2), label.sizeHint().height())
txt = labelFormat % (getdeepattr(master, value) / divideFactor)
label.setText(txt)
label.setLbl = lambda x: \
label.setText(labelFormat % (x / divideFactor))
signal.connect(label.setLbl)
connectControl(master, value, callback, signal, CallFrontHSlider(slider))
miscellanea(slider, sliderBox, widget, **misc)
return slider
def labeledSlider(widget, master, value, box=None,
label=None, labels=(), labelFormat=" %d", ticks=False,
callback=None, vertical=False, width=None, **misc):
"""
Construct a slider with labels instead of numbers.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param label: a label that is inserted into the box
:type label: str
:param labels: labels shown at different slider positions
:type labels: tuple of str
:param callback: a function that is called when the value is changed
:type callback: function
:param ticks: if set to `True`, ticks are added below the slider
:type ticks: bool
:param vertical: if set to `True`, the slider is vertical
:type vertical: bool
:param width: the width of the slider
:type width: int
:rtype: :obj:`PyQt4.QtGui.QSlider`
"""
sliderBox = widgetBox(widget, box, orientation="horizontal",
addToLayout=False)
if label:
widgetLabel(sliderBox, label)
sliderOrient = Qt.Vertical if vertical else Qt.Horizontal
slider = QtGui.QSlider(sliderOrient, sliderBox)
slider.ogValue = value
slider.setRange(0, len(labels) - 1)
slider.setSingleStep(1)
slider.setPageStep(1)
slider.setTickInterval(1)
sliderBox.layout().addWidget(slider)
slider.setValue(labels.index(getdeepattr(master, value)))
if width:
slider.setFixedWidth(width)
if ticks:
slider.setTickPosition(QtGui.QSlider.TicksBelow)
slider.setTickInterval(ticks)
max_label_size = 0
slider.value_label = value_label = QtGui.QLabel(sliderBox)
value_label.setAlignment(Qt.AlignRight)
sliderBox.layout().addWidget(value_label)
for lb in labels:
value_label.setText(labelFormat % lb)
max_label_size = max(max_label_size, value_label.sizeHint().width())
value_label.setFixedSize(max_label_size, value_label.sizeHint().height())
value_label.setText(getdeepattr(master, value))
if isinstance(labelFormat, str):
value_label.set_label = lambda x: \
value_label.setText(labelFormat % x)
else:
value_label.set_label = lambda x: value_label.setText(labelFormat(x))
slider.valueChanged[int].connect(value_label.set_label)
connectControl(master, value, callback, slider.valueChanged[int],
CallFrontLabeledSlider(slider, labels),
CallBackLabeledSlider(slider, master, labels))
miscellanea(slider, sliderBox, widget, **misc)
return slider
def valueSlider(widget, master, value, box=None, label=None,
values=(), labelFormat=" %d", ticks=False,
callback=None, vertical=False, width=None, **misc):
"""
Construct a slider with different values.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param label: a label that is inserted into the box
:type label: str
:param values: values at different slider positions
:type values: list of int
:param labelFormat: label format; default is `" %d"`; can also be a function
:type labelFormat: str or func
:param callback: a function that is called when the value is changed
:type callback: function
:param ticks: if set to `True`, ticks are added below the slider
:type ticks: bool
:param vertical: if set to `True`, the slider is vertical
:type vertical: bool
:param width: the width of the slider
:type width: int
:rtype: :obj:`PyQt4.QtGui.QSlider`
"""
if isinstance(labelFormat, str):
labelFormat = lambda x, f=labelFormat: f(x)
sliderBox = widgetBox(widget, box, orientation="horizontal",
addToLayout=False)
if label:
widgetLabel(sliderBox, label)
slider_orient = Qt.Vertical if vertical else Qt.Horizontal
slider = QtGui.QSlider(slider_orient, sliderBox)
slider.ogValue = value
slider.setRange(0, len(values) - 1)
slider.setSingleStep(1)
slider.setPageStep(1)
slider.setTickInterval(1)
sliderBox.layout().addWidget(slider)
slider.setValue(values.index(getdeepattr(master, value)))
if width:
slider.setFixedWidth(width)
if ticks:
slider.setTickPosition(QtGui.QSlider.TicksBelow)
slider.setTickInterval(ticks)
max_label_size = 0
slider.value_label = value_label = QtGui.QLabel(sliderBox)
value_label.setAlignment(Qt.AlignRight)
sliderBox.layout().addWidget(value_label)
for lb in values:
value_label.setText(labelFormat(lb))
max_label_size = max(max_label_size, value_label.sizeHint().width())
value_label.setFixedSize(max_label_size, value_label.sizeHint().height())
value_label.setText(labelFormat(getdeepattr(master, value)))
value_label.set_label = lambda x: value_label.setText(labelFormat(values[x]))
slider.valueChanged[int].connect(value_label.set_label)
connectControl(master, value, callback, slider.valueChanged[int],
CallFrontLabeledSlider(slider, values),
CallBackLabeledSlider(slider, master, values))
miscellanea(slider, sliderBox, widget, **misc)
return slider
class OrangeComboBox(QtGui.QComboBox):
"""
A QtGui.QComboBox subclass extened to support bounded contents width hint.
"""
def __init__(self, parent=None, maximumContentsLength=-1, **kwargs):
super().__init__(parent, **kwargs)
self.__maximumContentsLength = maximumContentsLength
def setMaximumContentsLength(self, length):
"""
Set the maximum contents length hint.
The hint specifies the upper bound on the `sizeHint` and
`minimumSizeHint` width specified in character length.
Set to 0 or negative value to disable.
.. note::
This property does not affect the widget's `maximumSize`.
The widget can still grow depending in it's sizePolicy.
Parameters
----------
lenght : int
Maximum contents length hint.
"""
if self.__maximumContentsLength != length:
self.__maximumContentsLength = length
self.updateGeometry()
def maximumContentsLength(self):
"""
Return the maximum contents length hint.
"""
return self.__maximumContentsLength
def sizeHint(self):
# reimplemented
sh = super().sizeHint()
if self.__maximumContentsLength > 0:
width = (self.fontMetrics().width("X") * self.__maximumContentsLength
+ self.iconSize().width() + 4)
sh = sh.boundedTo(QtCore.QSize(width, sh.height()))
return sh
def minimumSizeHint(self):
# reimplemented
sh = super().minimumSizeHint()
if self.__maximumContentsLength > 0:
width = (self.fontMetrics().width("X") * self.__maximumContentsLength
+ self.iconSize().width() + 4)
sh = sh.boundedTo(QtCore.QSize(width, sh.height()))
return sh
# TODO comboBox looks overly complicated:
# - is the argument control2attributeDict needed? doesn't emptyString do the
# job?
# - can valueType be anything else than str?
# - sendSelectedValue is not a great name
def comboBox(widget, master, value, box=None, label=None, labelWidth=None,
orientation='vertical', items=(), callback=None,
sendSelectedValue=False, valueType=str,
control2attributeDict=None, emptyString=None, editable=False,
contentsLength=None, maximumContentsLength=25,
**misc):
"""
Construct a combo box.
The `value` attribute of the `master` contains either the index of the
selected row (if `sendSelected` is left at default, `False`) or a value
converted to `valueType` (`str` by default).
Furthermore, the value is converted by looking up into dictionary
`control2attributeDict`.
:param widget: the widget into which the box is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the master's attribute with which the value is synchronized
:type value: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param orientation: orientation of the layout in the box
:type orientation: str or int or bool
:param label: a label that is inserted into the box
:type label: str
:param labelWidth: the width of the label
:type labelWidth: int
:param callback: a function that is called when the value is changed
:type callback: function
:param items: items (optionally with data) that are put into the box
:type items: tuple of str or tuples
:param sendSelectedValue: flag telling whether to store/retrieve indices
or string values from `value`
:type sendSelectedValue: bool
:param valueType: the type into which the selected value is converted
if sentSelectedValue is `False`
:type valueType: type
:param control2attributeDict: a dictionary through which the value is
converted
:type control2attributeDict: dict or None
:param emptyString: the string value in the combo box that gets stored as
an empty string in `value`
:type emptyString: str
:param editable: a flag telling whether the combo is editable
:type editable: bool
:param int contentsLength: Contents character length to use as a
fixed size hint. When not None, equivalent to::
combo.setSizeAdjustPolicy(
QComboBox.AdjustToMinimumContentsLengthWithIcon)
combo.setMinimumContentsLength(contentsLength)
:param int maximumContentsLength: Specifies the upper bound on the
`sizeHint` and `minimumSizeHint` width specified in character
length (default: 25, use 0 to disable)
:rtype: PyQt4.QtGui.QComboBox
"""
if box or label:
hb = widgetBox(widget, box, orientation, addToLayout=False)
if label is not None:
widgetLabel(hb, label, labelWidth)
else:
hb = widget
combo = OrangeComboBox(
hb, maximumContentsLength=maximumContentsLength,
editable=editable)
if contentsLength is not None:
combo.setSizeAdjustPolicy(
QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
combo.setMinimumContentsLength(contentsLength)
combo.box = hb
for item in items:
if isinstance(item, (tuple, list)):
combo.addItem(*item)
else:
combo.addItem(str(item))
if value:
cindex = getdeepattr(master, value)
if isinstance(cindex, str):
if items and cindex in items:
cindex = items.index(getdeepattr(master, value))
else:
cindex = 0
if cindex > combo.count() - 1:
cindex = 0
combo.setCurrentIndex(cindex)
if sendSelectedValue:
if control2attributeDict is None:
control2attributeDict = {}
if emptyString:
control2attributeDict[emptyString] = ""
connectControl(
master, value, callback, combo.activated[str],
CallFrontComboBox(combo, valueType, control2attributeDict),
ValueCallbackCombo(master, value, valueType,
control2attributeDict))
else:
connectControl(
master, value, callback, combo.activated[int],
CallFrontComboBox(combo, None, control2attributeDict))
miscellanea(combo, hb, widget, **misc)
return combo
class OrangeListBox(QtGui.QListWidget):
"""
List box with drag and drop functionality. Function :obj:`listBox`
constructs instances of this class; do not use the class directly.
.. attribute:: master
The widget into which the listbox is inserted.
.. attribute:: ogLabels
The name of the master's attribute that holds the strings with items
in the list box.
.. attribute:: ogValue
The name of the master's attribute that holds the indices of selected
items.
.. attribute:: enableDragDrop
A flag telling whether drag-and-drop is enabled.
.. attribute:: dragDropCallback
A callback that is called at the end of drop event.
.. attribute:: dataValidityCallback
A callback that is called on dragEnter and dragMove events and returns
either `ev.accept()` or `ev.ignore()`.
.. attribute:: defaultSizeHint
The size returned by the `sizeHint` method.
"""
def __init__(self, master, enableDragDrop=False, dragDropCallback=None,
dataValidityCallback=None, sizeHint=None, *args):
"""
:param master: the master widget
:type master: OWWidget or OWComponent
:param enableDragDrop: flag telling whether drag and drop is enabled
:type enableDragDrop: bool
:param dragDropCallback: callback for the end of drop event
:type dragDropCallback: function
:param dataValidityCallback: callback that accepts or ignores dragEnter
and dragMove events
:type dataValidityCallback: function with one argument (event)
:param sizeHint: size hint
:type sizeHint: PyQt4.QtGui.QSize
:param args: optional arguments for the inherited constructor
"""
self.master = master
super().__init__(*args)
self.drop_callback = dragDropCallback
self.valid_data_callback = dataValidityCallback
if not sizeHint:
self.size_hint = QtCore.QSize(150, 100)
else:
self.size_hint = sizeHint
if enableDragDrop:
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def sizeHint(self):
return self.size_hint
def dragEnterEvent(self, ev):
super().dragEnterEvent(ev)
if self.valid_data_callback:
self.valid_data_callback(ev)
elif isinstance(ev.source(), OrangeListBox):
ev.setDropAction(Qt.MoveAction)
ev.accept()
else:
ev.ignore()
def dropEvent(self, ev):
ev.setDropAction(Qt.MoveAction)
super().dropEvent(ev)
items = self.update_master()
if ev.source() is not self:
ev.source().update_master(exclude=items)
if self.drop_callback:
self.drop_callback()
def update_master(self, exclude=()):
control_list = [self.item(i).data(Qt.UserRole) for i in range(self.count()) if self.item(i).data(Qt.UserRole) not in exclude]
if self.ogLabels:
master_list = getattr(self.master, self.ogLabels)
if master_list != control_list:
setattr(self.master, self.ogLabels, control_list)
return control_list
def updateGeometries(self):
# A workaround for a bug in Qt
# (see: http://bugreports.qt.nokia.com/browse/QTBUG-14412)
if getattr(self, "_updatingGeometriesNow", False):
return
self._updatingGeometriesNow = True
try:
return super().updateGeometries()
finally:
self._updatingGeometriesNow = False
# TODO: SmallWidgetButton is used only in OWkNNOptimization.py. (Re)Move.
# eliminated?
class SmallWidgetButton(QtGui.QPushButton):
def __init__(self, widget, text="", pixmap=None, box=None,
orientation='vertical', autoHideWidget=None, **misc):
#self.parent = parent
if pixmap is not None:
iconDir = os.path.join(os.path.dirname(__file__), "icons")
name = ""
if isinstance(pixmap, str):
if os.path.exists(pixmap):
name = pixmap
elif os.path.exists(os.path.join(iconDir, pixmap)):
name = os.path.join(iconDir, pixmap)
elif isinstance(pixmap, (QtGui.QPixmap, QtGui.QIcon)):
name = pixmap
name = name or os.path.join(iconDir, "arrow_down.png")
super().__init__(QtGui.QIcon(name), text, widget)
else:
super().__init__(text, widget)
if widget.layout() is not None:
widget.layout().addWidget(self)
# create autohide widget and set a layout
self.widget = self.autohideWidget = \
(autoHideWidget or AutoHideWidget)(None, Qt.Popup)
setLayout(self.widget, orientation)
if box:
self.widget = widgetBox(self.widget, box, orientation)
self.autohideWidget.hide()
miscellanea(self, self.widget, widget, **misc)
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if self.autohideWidget.isVisible():
self.autohideWidget.hide()
else:
self.autohideWidget.move(
self.mapToGlobal(QtCore.QPoint(0, self.height())))
self.autohideWidget.show()
class SmallWidgetLabel(QtGui.QLabel):
def __init__(self, widget, text="", pixmap=None, box=None,
orientation='vertical', **misc):
super().__init__(widget)
if text:
self.setText("<font color=\"#C10004\">" + text + "</font>")
elif pixmap is not None:
iconDir = os.path.join(os.path.dirname(__file__), "icons")
name = ""
if isinstance(pixmap, str):
if os.path.exists(pixmap):
name = pixmap
elif os.path.exists(os.path.join(iconDir, pixmap)):
name = os.path.join(iconDir, pixmap)
elif isinstance(pixmap, (QtGui.QPixmap, QtGui.QIcon)):
name = pixmap
name = name or os.path.join(iconDir, "arrow_down.png")
self.setPixmap(QtGui.QPixmap(name))
self.autohideWidget = self.widget = AutoHideWidget(None, Qt.Popup)
setLayout(self.widget, orientation)
if box:
self.widget = widgetBox(self.widget, box, orientation)
self.autohideWidget.hide()
miscellanea(self, self.widget, widget, **misc)
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if self.autohideWidget.isVisible():
self.autohideWidget.hide()
else:
self.autohideWidget.move(
self.mapToGlobal(QtCore.QPoint(0, self.height())))
self.autohideWidget.show()
class AutoHideWidget(QtGui.QWidget):
def leaveEvent(self, _):
self.hide()
# TODO Class SearchLineEdit: it doesn't seem to be used anywhere
# see widget DataDomain
class SearchLineEdit(QtGui.QLineEdit):
"""
QLineEdit for quick searches
"""
def __init__(self, t, searcher):
super().__init__(self, t)
self.searcher = searcher
def keyPressEvent(self, e):
"""
Handles keys up and down by selecting the previous and the next item
in the list, and the escape key, which hides the searcher.
"""
k = e.key()
if k == Qt.Key_Down:
curItem = self.searcher.lb.currentItem()
if curItem + 1 < self.searcher.lb.count():
self.searcher.lb.setCurrentItem(curItem + 1)
elif k == Qt.Key_Up:
curItem = self.searcher.lb.currentItem()
if curItem:
self.searcher.lb.setCurrentItem(curItem - 1)
elif k == Qt.Key_Escape:
self.searcher.window.hide()
else:
return super().keyPressEvent(e)
# TODO Class Searcher: it doesn't seem to be used anywhere
# see widget DataDomain
class Searcher:
"""
The searcher class for :obj:`SearchLineEdit`.
"""
def __init__(self, control, master):
self.control = control
self.master = master
def __call__(self):
_s = QtGui.QStyle
self.window = t = QtGui.QFrame(
self.master,
_s.WStyle_Dialog + _s.WStyle_Tool + _s.WStyle_Customize +
_s.WStyle_NormalBorder)
QtGui.QVBoxLayout(t).setAutoAdd(1)
gs = self.master.mapToGlobal(QtCore.QPoint(0, 0))
gl = self.control.mapToGlobal(QtCore.QPoint(0, 0))
t.move(gl.x() - gs.x(), gl.y() - gs.y())
self.allItems = [self.control.text(i)
for i in range(self.control.count())]
le = SearchLineEdit(t, self)
self.lb = QtGui.QListWidget(t)
for i in self.allItems:
self.lb.insertItem(i)
t.setFixedSize(self.control.width(), 200)
t.show()
le.setFocus()
le.textChanged.connect(self.textChanged)
le.returnPressed.connect(self.returnPressed)
self.lb.itemClicked.connect(self.mouseClicked)
def textChanged(self, s):
s = str(s)
self.lb.clear()
for i in self.allItems:
if s.lower() in i.lower():
self.lb.insertItem(i)
def returnPressed(self):
if self.lb.count():
self.conclude(self.lb.text(max(0, self.lb.currentItem())))
else:
self.window.hide()
def mouseClicked(self, item):
self.conclude(item.text())
def conclude(self, value):
index = self.allItems.index(value)
self.control.setCurrentItem(index)
if self.control.cback:
if self.control.sendSelectedValue:
self.control.cback(value)
else:
self.control.cback(index)
if self.control.cfunc:
self.control.cfunc()
self.window.hide()
# creates a widget box with a button in the top right edge that shows/hides all
# widgets in the box and collapse the box to its minimum height
# TODO collapsableWidgetBox is used only in OWMosaicDisplay.py; (re)move
class collapsableWidgetBox(QtGui.QGroupBox):
def __init__(self, widget, box="", master=None, value="",
orientation="vertical", callback=None):
super().__init__(widget)
self.setFlat(1)
setLayout(self, orientation)
if widget.layout() is not None:
widget.layout().addWidget(self)
if isinstance(box, str):
self.setTitle(" " + box.strip() + " ")
self.setCheckable(True)
self.master = master
self.value = value
self.callback = callback
self.clicked.connect(self.toggled)
def toggled(self, _=0):
if self.value:
self.master.__setattr__(self.value, self.isChecked())
self.updateControls()
if self.callback is not None:
self.callback()
def updateControls(self):
val = getdeepattr(self.master, self.value)
width = self.width()
self.setChecked(val)
self.setFlat(not val)
self.setMinimumSize(QtCore.QSize(width if not val else 0, 0))
for c in self.children():
if isinstance(c, QtGui.QLayout):
continue
if val:
c.show()
else:
c.hide()
# creates an icon that allows you to show/hide the widgets in the widgets list
# TODO Class widgetHider doesn't seem to be used anywhere; remove?
class widgetHider(QtGui.QWidget):
def __init__(self, widget, master, value, _=(19, 19), widgets=None,
tooltip=None):
super().__init__(widget)
if widget.layout() is not None:
widget.layout().addWidget(self)
self.value = value
self.master = master
if tooltip:
self.setToolTip(tooltip)
iconDir = os.path.join(os.path.dirname(__file__), "icons")
icon1 = os.path.join(iconDir, "arrow_down.png")
icon2 = os.path.join(iconDir, "arrow_up.png")
self.pixmaps = [QtGui.QPixmap(icon1), QtGui.QPixmap(icon2)]
self.setFixedSize(self.pixmaps[0].size())
self.disables = list(widgets or [])
self.makeConsistent = Disabler(self, master, value, type=HIDER)
if widgets:
self.setWidgets(widgets)
def mousePressEvent(self, ev):
self.master.__setattr__(self.value,
not getdeepattr(self.master, self.value))
self.makeConsistent()
def setWidgets(self, widgets):
self.disables = list(widgets)
self.makeConsistent()
def paintEvent(self, ev):
super().paintEvent(ev)
if self.pixmaps:
pix = self.pixmaps[getdeepattr(self.master, self.value)]
painter = QtGui.QPainter(self)
painter.drawPixmap(0, 0, pix)
##############################################################################
# callback handlers
def auto_commit(widget, master, value, label, auto_label=None, box=True,
checkbox_label=None, orientation=None, commit=None,
callback=None, **misc):
"""
Add a commit button with auto-commit check box.
The widget must have a commit method and a setting that stores whether
auto-commit is on.
The function replaces the commit method with a new commit method that
checks whether auto-commit is on. If it is, it passes the call to the
original commit, otherwise it sets the dirty flag.
The checkbox controls the auto-commit. When auto-commit is switched on, the
checkbox callback checks whether the dirty flag is on and calls the original
commit.
Important! Do not connect any signals to the commit before calling
auto_commit.
:param widget: the widget into which the box with the button is inserted
:type widget: PyQt4.QtGui.QWidget or None
:param value: the master's attribute which stores whether the auto-commit
is on
:type value: str
:param master: master widget
:type master: OWWidget or OWComponent
:param label: The button label
:type label: str
:param label: The label used when auto-commit is on; default is
`"Auto " + label`
:type label: str
:param commit: master's method to override ('commit' by default)
:type commit: function
:param callback: function to call whenever the checkbox's statechanged
:type callback: function
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:return: the box
"""
def checkbox_toggled():
if getattr(master, value):
btn.setText(auto_label)
btn.setEnabled(False)
if dirty:
do_commit()
else:
btn.setText(label)
btn.setEnabled(True)
if callback:
callback()
def unconditional_commit(*args, **kwargs):
nonlocal dirty
if getattr(master, value):
do_commit(*args, **kwargs)
else:
dirty = True
def do_commit(*args, **kwargs):
nonlocal dirty
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
commit(*args, **kwargs)
QApplication.restoreOverrideCursor()
dirty = False
dirty = False
commit = commit or getattr(master, 'commit')
commit_name = next(LAMBDA_NAME) if isinstance(commit, LambdaType) else commit.__name__
setattr(master, 'unconditional_' + commit_name, commit)
if not auto_label:
if checkbox_label:
auto_label = label
else:
auto_label = "Auto " + label.lower() + " is on"
if isinstance(box, QtGui.QWidget):
b = box
else:
if orientation is None:
orientation = bool(checkbox_label)
b = widgetBox(widget, box=box, orientation=orientation,
addToLayout=False)
b.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
b.checkbox = cb = checkBox(b, master, value, checkbox_label,
callback=checkbox_toggled, tooltip=auto_label)
if checkbox_label and orientation == 'horizontal' or not orientation:
b.layout().insertSpacing(-1, 10)
cb.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
b.button = btn = button(b, master, label, callback=lambda: do_commit())
if not checkbox_label:
btn.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
checkbox_toggled()
setattr(master, commit_name, unconditional_commit)
miscellanea(b, widget, widget,
addToLayout=not isinstance(box, QtGui.QWidget), **misc)
return b
class ControlledList(list):
"""
A class derived from a list that is connected to a
:obj:`PyQt4.QtGui.QListBox`: the list contains indices of items that are
selected in the list box. Changing the list content changes the
selection in the list box.
"""
def __init__(self, content, listBox=None):
super().__init__(content)
self.listBox = listBox
def __reduce__(self):
# cannot pickle self.listBox, but can't discard it
# (ControlledList may live on)
import copyreg
return copyreg._reconstructor, (list, list, ()), None, self.__iter__()
# TODO ControllgedList.item2name is probably never used
def item2name(self, item):
item = self.listBox.labels[item]
if type(item) is tuple:
return item[1]
else:
return item
def __setitem__(self, index, item):
if isinstance(index, int):
self.listBox.item(self[index]).setSelected(0)
item.setSelected(1)
else:
for i in self[index]:
self.listBox.item(i).setSelected(0)
for i in item:
self.listBox.item(i).setSelected(1)
super().__setitem__(index, item)
def __delitem__(self, index):
if isinstance(index, int):
self.listBox.item(self[index]).setSelected(0)
else:
for i in self[index]:
self.listBox.item(i).setSelected(0)
super().__delitem__(index)
def append(self, item):
super().append(item)
item.setSelected(1)
def extend(self, items):
super().extend(items)
for i in items:
self.listBox.item(i).setSelected(1)
def insert(self, index, item):
item.setSelected(1)
super().insert(index, item)
def pop(self, index=-1):
i = super().pop(index)
self.listBox.item(i).setSelected(0)
def remove(self, item):
item.setSelected(0)
super().remove(item)
def connectControl(master, value, f, signal,
cfront, cback=None, cfunc=None, fvcb=None):
cback = cback or value and ValueCallback(master, value, fvcb)
if cback:
if signal:
signal.connect(cback)
cback.opposite = cfront
if value and cfront and hasattr(master, CONTROLLED_ATTRIBUTES):
getattr(master, CONTROLLED_ATTRIBUTES)[value] = cfront
cfunc = cfunc or f and FunctionCallback(master, f)
if cfunc:
if signal:
signal.connect(cfunc)
cfront.opposite = tuple(filter(None, (cback, cfunc)))
return cfront, cback, cfunc
class ControlledCallback:
def __init__(self, widget, attribute, f=None):
self.widget = widget
self.attribute = attribute
self.f = f
self.disabled = 0
if isinstance(widget, dict):
return # we can't assign attributes to dict
if not hasattr(widget, "callbackDeposit"):
widget.callbackDeposit = []
widget.callbackDeposit.append(self)
def acyclic_setattr(self, value):
if self.disabled:
return
if self.f:
if self.f in (int, float) and (
not value or isinstance(value, str) and value in "+-"):
value = self.f(0)
else:
value = self.f(value)
opposite = getattr(self, "opposite", None)
if opposite:
try:
opposite.disabled += 1
if type(self.widget) is dict:
self.widget[self.attribute] = value
else:
setattr(self.widget, self.attribute, value)
finally:
opposite.disabled -= 1
else:
if isinstance(self.widget, dict):
self.widget[self.attribute] = value
else:
setattr(self.widget, self.attribute, value)
class ValueCallback(ControlledCallback):
# noinspection PyBroadException
def __call__(self, value):
if value is None:
return
try:
self.acyclic_setattr(value)
except:
print("gui.ValueCallback: %s" % value)
import traceback
import sys
traceback.print_exception(*sys.exc_info())
class ValueCallbackCombo(ValueCallback):
def __init__(self, widget, attribute, f=None, control2attributeDict=None):
super().__init__(widget, attribute, f)
self.control2attributeDict = control2attributeDict or {}
def __call__(self, value):
value = str(value)
return super().__call__(self.control2attributeDict.get(value, value))
class ValueCallbackLineEdit(ControlledCallback):
def __init__(self, control, widget, attribute, f=None):
ControlledCallback.__init__(self, widget, attribute, f)
self.control = control
# noinspection PyBroadException
def __call__(self, value):
if value is None:
return
try:
pos = self.control.cursorPosition()
self.acyclic_setattr(value)
self.control.setCursorPosition(pos)
except:
print("invalid value ", value, type(value))
import traceback
import sys
traceback.print_exception(*sys.exc_info())
class SetLabelCallback:
def __init__(self, widget, label, format="%5.2f", f=None):
self.widget = widget
self.label = label
self.format = format
self.f = f
if hasattr(widget, "callbackDeposit"):
widget.callbackDeposit.append(self)
self.disabled = 0
def __call__(self, value):
if not self.disabled and value is not None:
if self.f:
value = self.f(value)
self.label.setText(self.format % value)
class FunctionCallback:
def __init__(self, master, f, widget=None, id=None, getwidget=False):
self.master = master
self.widget = widget
self.f = f
self.id = id
self.getwidget = getwidget
if hasattr(master, "callbackDeposit"):
master.callbackDeposit.append(self)
self.disabled = 0
def __call__(self, *value):
if not self.disabled and value is not None:
kwds = {}
if self.id is not None:
kwds['id'] = self.id
if self.getwidget:
kwds['widget'] = self.widget
if isinstance(self.f, list):
for f in self.f:
f(**kwds)
else:
self.f(**kwds)
class CallBackListBox:
def __init__(self, control, widget):
self.control = control
self.widget = widget
self.disabled = 0
def __call__(self, *_): # triggered by selectionChange()
if not self.disabled and self.control.ogValue is not None:
clist = getdeepattr(self.widget, self.control.ogValue)
# skip the overloaded method to avoid a cycle
list.__delitem__(clist, slice(0, len(clist)))
control = self.control
for i in range(control.count()):
if control.item(i).isSelected():
list.append(clist, i)
self.widget.__setattr__(self.control.ogValue, clist)
class CallBackRadioButton:
def __init__(self, control, widget):
self.control = control
self.widget = widget
self.disabled = False
def __call__(self, *_): # triggered by toggled()
if not self.disabled and self.control.ogValue is not None:
arr = [butt.isChecked() for butt in self.control.buttons]
self.widget.__setattr__(self.control.ogValue, arr.index(1))
class CallBackLabeledSlider:
def __init__(self, control, widget, lookup):
self.control = control
self.widget = widget
self.lookup = lookup
self.disabled = False
def __call__(self, *_):
if not self.disabled and self.control.ogValue is not None:
self.widget.__setattr__(self.control.ogValue,
self.lookup[self.control.value()])
##############################################################################
# call fronts (change of the attribute value changes the related control)
class ControlledCallFront:
def __init__(self, control):
self.control = control
self.disabled = 0
def action(self, *_):
pass
def __call__(self, *args):
if not self.disabled:
opposite = getattr(self, "opposite", None)
if opposite:
try:
for op in opposite:
op.disabled += 1
self.action(*args)
finally:
for op in opposite:
op.disabled -= 1
else:
self.action(*args)
class CallFrontSpin(ControlledCallFront):
def action(self, value):
if value is not None:
self.control.setValue(value)
class CallFrontDoubleSpin(ControlledCallFront):
def action(self, value):
if value is not None:
self.control.setValue(value)
class CallFrontCheckBox(ControlledCallFront):
def action(self, value):
if value is not None:
values = [Qt.Unchecked, Qt.Checked, Qt.PartiallyChecked]
self.control.setCheckState(values[value])
class CallFrontButton(ControlledCallFront):
def action(self, value):
if value is not None:
self.control.setChecked(bool(value))
class CallFrontComboBox(ControlledCallFront):
def __init__(self, control, valType=None, control2attributeDict=None):
super().__init__(control)
self.valType = valType
if control2attributeDict is None:
self.attribute2controlDict = {}
else:
self.attribute2controlDict = \
{y: x for x, y in control2attributeDict.items()}
def action(self, value):
if value is not None:
value = self.attribute2controlDict.get(value, value)
if self.valType:
for i in range(self.control.count()):
if self.valType(str(self.control.itemText(i))) == value:
self.control.setCurrentIndex(i)
return
values = ""
for i in range(self.control.count()):
values += str(self.control.itemText(i)) + \
(i < self.control.count() - 1 and ", " or ".")
print("unable to set %s to value '%s'. Possible values are %s"
% (self.control, value, values))
else:
if value < self.control.count():
self.control.setCurrentIndex(value)
class CallFrontHSlider(ControlledCallFront):
def action(self, value):
if value is not None:
self.control.setValue(value)
class CallFrontLabeledSlider(ControlledCallFront):
def __init__(self, control, lookup):
super().__init__(control)
self.lookup = lookup
def action(self, value):
if value is not None:
self.control.setValue(self.lookup.index(value))
class CallFrontLogSlider(ControlledCallFront):
def action(self, value):
if value is not None:
if value < 1e-30:
print("unable to set %s to %s (value too small)" %
(self.control, value))
else:
self.control.setValue(math.log10(value))
class CallFrontLineEdit(ControlledCallFront):
def action(self, value):
self.control.setText(str(value))
class CallFrontRadioButtons(ControlledCallFront):
def action(self, value):
if value < 0 or value >= len(self.control.buttons):
value = 0
self.control.buttons[value].setChecked(1)
class CallFrontListBox(ControlledCallFront):
def action(self, value):
if value is not None:
if not isinstance(value, ControlledList):
setattr(self.control.ogMaster, self.control.ogValue,
ControlledList(value, self.control))
for i in range(self.control.count()):
shouldBe = i in value
if shouldBe != self.control.item(i).isSelected():
self.control.item(i).setSelected(shouldBe)
class CallFrontListBoxLabels(ControlledCallFront):
unknownType = None
def action(self, values):
self.control.clear()
if values:
for value in values:
if isinstance(value, tuple):
text, icon = value
if isinstance(icon, int):
item = QtGui.QListWidgetItem(attributeIconDict[icon], text)
else:
item = QtGui.QListWidgetItem(icon, text)
elif isinstance(value, Variable):
item = QtGui.QListWidgetItem(*attributeItem(value))
else:
item = QtGui.QListWidgetItem(value)
item.setData(Qt.UserRole, value)
self.control.addItem(item)
class CallFrontLabel:
def __init__(self, control, label, master):
self.control = control
self.label = label
self.master = master
def __call__(self, *_):
self.control.setText(self.label % self.master.__dict__)
##############################################################################
## Disabler is a call-back class for check box that can disable/enable other
## widgets according to state (checked/unchecked, enabled/disable) of the
## given check box
##
## Tricky: if self.propagateState is True (default), then if check box is
## disabled the related widgets will be disabled (even if the checkbox is
## checked). If self.propagateState is False, the related widgets will be
## disabled/enabled if check box is checked/clear, disregarding whether the
## check box itself is enabled or not. (If you don't understand, see the
## code :-)
DISABLER = 1
HIDER = 2
# noinspection PyShadowingBuiltins
class Disabler:
def __init__(self, widget, master, valueName, propagateState=True,
type=DISABLER):
self.widget = widget
self.master = master
self.valueName = valueName
self.propagateState = propagateState
self.type = type
def __call__(self, *value):
currState = self.widget.isEnabled()
if currState or not self.propagateState:
if len(value):
disabled = not value[0]
else:
disabled = not getdeepattr(self.master, self.valueName)
else:
disabled = 1
for w in self.widget.disables:
if type(w) is tuple:
if isinstance(w[0], int):
i = 1
if w[0] == -1:
disabled = not disabled
else:
i = 0
if self.type == DISABLER:
w[i].setDisabled(disabled)
elif self.type == HIDER:
if disabled:
w[i].hide()
else:
w[i].show()
if hasattr(w[i], "makeConsistent"):
w[i].makeConsistent()
else:
if self.type == DISABLER:
w.setDisabled(disabled)
elif self.type == HIDER:
if disabled:
w.hide()
else:
w.show()
##############################################################################
# some table related widgets
# noinspection PyShadowingBuiltins
class tableItem(QtGui.QTableWidgetItem):
def __init__(self, table, x, y, text, editType=None, backColor=None,
icon=None, type=QtGui.QTableWidgetItem.Type):
super().__init__(type)
if icon:
self.setIcon(QtGui.QIcon(icon))
if editType is not None:
self.setFlags(editType)
else:
self.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable |
Qt.ItemIsSelectable)
if backColor is not None:
self.setBackground(QtGui.QBrush(backColor))
# we add it this way so that text can also be int and sorting will be
# done properly (as integers and not as text)
self.setData(Qt.DisplayRole, text)
table.setItem(x, y, self)
TableValueRole = next(OrangeUserRole) # Role to retrieve orange.Value
TableClassValueRole = next(OrangeUserRole) # Retrieve class value for the row
TableDistribution = next(OrangeUserRole) # Retrieve distribution of the column
TableVariable = next(OrangeUserRole) # Role to retrieve the column's variable
BarRatioRole = next(OrangeUserRole) # Ratio for drawing distribution bars
BarBrushRole = next(OrangeUserRole) # Brush for distribution bar
SortOrderRole = next(OrangeUserRole) # Used for sorting
class TableBarItem(QtGui.QItemDelegate):
BarRole = next(OrangeUserRole)
ColorRole = next(OrangeUserRole)
def __init__(self, parent=None, color=QtGui.QColor(255, 170, 127),
color_schema=None):
"""
:param QObject parent: Parent object.
:param QColor color: Default color of the distribution bar.
:param color_schema:
If not None it must be an instance of
:class:`OWColorPalette.ColorPaletteGenerator` (note: this
parameter, if set, overrides the ``color``)
:type color_schema: :class:`OWColorPalette.ColorPaletteGenerator`
"""
super().__init__(parent)
self.color = color
self.color_schema = color_schema
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
ratio = index.data(TableBarItem.BarRole)
if isinstance(ratio, float):
if math.isnan(ratio):
ratio = None
color = self.color
if self.color_schema is not None and ratio is not None:
class_ = index.data(TableClassValueRole)
if isinstance(class_, Orange.data.Value) and \
class_.variable.is_discrete and \
not math.isnan(class_):
color = self.color_schema[int(class_)]
if ratio is not None:
painter.save()
painter.setPen(QtGui.QPen(QtGui.QBrush(color), 5,
Qt.SolidLine, Qt.RoundCap))
rect = option.rect.adjusted(3, 0, -3, -5)
x, y = rect.x(), rect.y() + rect.height()
painter.drawLine(x, y, x + rect.width() * ratio, y)
painter.restore()
text_rect = option.rect.adjusted(0, 0, 0, -3)
else:
text_rect = option.rect
text = index.data(Qt.DisplayRole)
self.drawDisplay(painter, option, text_rect, text)
painter.restore()
class BarItemDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent, brush=QtGui.QBrush(QtGui.QColor(255, 170, 127)),
scale=(0.0, 1.0)):
super().__init__(parent)
self.brush = brush
self.scale = scale
def paint(self, painter, option, index):
if option.widget is not None:
style = option.widget.style()
else:
style = QtGui.QApplication.style()
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewRow, option, painter,
option.widget)
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewItem, option, painter,
option.widget)
rect = option.rect
val = index.data(Qt.DisplayRole)
if isinstance(val, float):
minv, maxv = self.scale
val = (val - minv) / (maxv - minv)
painter.save()
if option.state & QtGui.QStyle.State_Selected:
painter.setOpacity(0.75)
painter.setBrush(self.brush)
painter.drawRect(
rect.adjusted(1, 1, - rect.width() * (1.0 - val) - 2, -2))
painter.restore()
class IndicatorItemDelegate(QtGui.QStyledItemDelegate):
IndicatorRole = next(OrangeUserRole)
def __init__(self, parent, role=IndicatorRole, indicatorSize=2):
super().__init__(parent)
self.role = role
self.indicatorSize = indicatorSize
def paint(self, painter, option, index):
super().paint(painter, option, index)
rect = option.rect
indicator = index.data(self.role)
if indicator:
painter.save()
painter.setRenderHints(QtGui.QPainter.Antialiasing)
painter.setBrush(QtGui.QBrush(Qt.black))
painter.drawEllipse(rect.center(),
self.indicatorSize, self.indicatorSize)
painter.restore()
class LinkStyledItemDelegate(QtGui.QStyledItemDelegate):
LinkRole = next(OrangeUserRole)
def __init__(self, parent):
super().__init__(parent)
self.mousePressState = QtCore.QModelIndex(), QtCore.QPoint()
parent.entered.connect(self.onEntered)
def sizeHint(self, option, index):
size = super().sizeHint(option, index)
return QtCore.QSize(size.width(), max(size.height(), 20))
def linkRect(self, option, index):
if option.widget is not None:
style = option.widget.style()
else:
style = QtGui.QApplication.style()
text = self.displayText(index.data(Qt.DisplayRole),
QtCore.QLocale.system())
self.initStyleOption(option, index)
textRect = style.subElementRect(
QtGui.QStyle.SE_ItemViewItemText, option, option.widget)
if not textRect.isValid():
textRect = option.rect
margin = style.pixelMetric(
QtGui.QStyle.PM_FocusFrameHMargin, option, option.widget) + 1
textRect = textRect.adjusted(margin, 0, -margin, 0)
font = index.data(Qt.FontRole)
if not isinstance(font, QtGui.QFont):
font = option.font
metrics = QtGui.QFontMetrics(font)
elideText = metrics.elidedText(text, option.textElideMode,
textRect.width())
return metrics.boundingRect(textRect, option.displayAlignment,
elideText)
def editorEvent(self, event, model, option, index):
if event.type() == QtCore.QEvent.MouseButtonPress and \
self.linkRect(option, index).contains(event.pos()):
self.mousePressState = (QtCore.QPersistentModelIndex(index),
QtCore.QPoint(event.pos()))
elif event.type() == QtCore.QEvent.MouseButtonRelease:
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
pressedIndex, pressPos = self.mousePressState
if pressedIndex == index and \
(pressPos - event.pos()).manhattanLength() < 5 and \
link is not None:
import webbrowser
webbrowser.open(link)
self.mousePressState = QtCore.QModelIndex(), event.pos()
elif event.type() == QtCore.QEvent.MouseMove:
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is not None and \
self.linkRect(option, index).contains(event.pos()):
self.parent().viewport().setCursor(Qt.PointingHandCursor)
else:
self.parent().viewport().setCursor(Qt.ArrowCursor)
return super().editorEvent(event, model, option, index)
def onEntered(self, index):
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is None:
self.parent().viewport().setCursor(Qt.ArrowCursor)
def paint(self, painter, option, index):
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is not None:
if option.widget is not None:
style = option.widget.style()
else:
style = QtGui.QApplication.style()
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewRow, option, painter,
option.widget)
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewItem, option, painter,
option.widget)
text = self.displayText(index.data(Qt.DisplayRole),
QtCore.QLocale.system())
textRect = style.subElementRect(
QtGui.QStyle.SE_ItemViewItemText, option, option.widget)
if not textRect.isValid():
textRect = option.rect
margin = style.pixelMetric(
QtGui.QStyle.PM_FocusFrameHMargin, option, option.widget) + 1
textRect = textRect.adjusted(margin, 0, -margin, 0)
elideText = QtGui.QFontMetrics(option.font).elidedText(
text, option.textElideMode, textRect.width())
painter.save()
font = index.data(Qt.FontRole)
if not isinstance(font, QtGui.QFont):
font = option.font
painter.setFont(font)
painter.setPen(QtGui.QPen(Qt.blue))
painter.drawText(textRect, option.displayAlignment, elideText)
painter.restore()
else:
super().paint(painter, option, index)
LinkRole = LinkStyledItemDelegate.LinkRole
class ColoredBarItemDelegate(QtGui.QStyledItemDelegate):
""" Item delegate that can also draws a distribution bar
"""
def __init__(self, parent=None, decimals=3, color=Qt.red):
super().__init__(parent)
self.decimals = decimals
self.float_fmt = "%%.%if" % decimals
self.color = QtGui.QColor(color)
def displayText(self, value, locale):
if isinstance(value, float):
return self.float_fmt % value
elif isinstance(value, str):
return value
elif value is None:
return "NA"
else:
return str(value)
def sizeHint(self, option, index):
font = self.get_font(option, index)
metrics = QtGui.QFontMetrics(font)
height = metrics.lineSpacing() + 8 # 4 pixel margin
width = metrics.width(self.displayText(index.data(Qt.DisplayRole),
QtCore.QLocale())) + 8
return QtCore.QSize(width, height)
def paint(self, painter, option, index):
self.initStyleOption(option, index)
text = self.displayText(index.data(Qt.DisplayRole), QtCore.QLocale())
ratio, have_ratio = self.get_bar_ratio(option, index)
rect = option.rect
if have_ratio:
# The text is raised 3 pixels above the bar.
# TODO: Style dependent margins?
text_rect = rect.adjusted(4, 1, -4, -4)
else:
text_rect = rect.adjusted(4, 4, -4, -4)
painter.save()
font = self.get_font(option, index)
painter.setFont(font)
if option.widget is not None:
style = option.widget.style()
else:
style = QtGui.QApplication.style()
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewRow, option, painter,
option.widget)
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewItem, option, painter,
option.widget)
# TODO: Check ForegroundRole.
if option.state & QtGui.QStyle.State_Selected:
color = option.palette.highlightedText().color()
else:
color = option.palette.text().color()
painter.setPen(QtGui.QPen(color))
align = self.get_text_align(option, index)
metrics = QtGui.QFontMetrics(font)
elide_text = metrics.elidedText(
text, option.textElideMode, text_rect.width())
painter.drawText(text_rect, align, elide_text)
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
if have_ratio:
brush = self.get_bar_brush(option, index)
painter.setBrush(brush)
painter.setPen(QtGui.QPen(brush, 1))
bar_rect = QtCore.QRect(text_rect)
bar_rect.setTop(bar_rect.bottom() - 1)
bar_rect.setBottom(bar_rect.bottom() + 1)
w = text_rect.width()
bar_rect.setWidth(max(0, min(w * ratio, w)))
painter.drawRoundedRect(bar_rect, 2, 2)
painter.restore()
def get_font(self, option, index):
font = index.data(Qt.FontRole)
if not isinstance(font, QtGui.QFont):
font = option.font
return font
def get_text_align(self, _, index):
align = index.data(Qt.TextAlignmentRole)
if not isinstance(align, int):
align = Qt.AlignLeft | Qt.AlignVCenter
return align
def get_bar_ratio(self, _, index):
ratio = index.data(BarRatioRole)
return ratio, isinstance(ratio, float)
def get_bar_brush(self, _, index):
bar_brush = index.data(BarBrushRole)
if not isinstance(bar_brush, (QtGui.QColor, QtGui.QBrush)):
bar_brush = self.color
return QtGui.QBrush(bar_brush)
class VerticalLabel(QtGui.QLabel):
def __init__(self, text, parent=None):
super().__init__(text, parent)
self.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.MinimumExpanding)
self.setMaximumWidth(self.sizeHint().width() + 2)
self.setMargin(4)
def sizeHint(self):
metrics = QtGui.QFontMetrics(self.font())
rect = metrics.boundingRect(self.text())
size = QtCore.QSize(rect.height() + self.margin(),
rect.width() + self.margin())
return size
def setGeometry(self, rect):
super().setGeometry(rect)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
rect = self.geometry()
text_rect = QtCore.QRect(0, 0, rect.width(), rect.height())
painter.translate(text_rect.bottomLeft())
painter.rotate(-90)
painter.drawText(
QtCore.QRect(QtCore.QPoint(0, 0),
QtCore.QSize(rect.height(), rect.width())),
Qt.AlignCenter, self.text())
painter.end()
class VerticalItemDelegate(QtGui.QStyledItemDelegate):
# Extra text top/bottom margin.
Margin = 6
def sizeHint(self, option, index):
sh = super().sizeHint(option, index)
return QtCore.QSize(sh.height() + self.Margin * 2, sh.width())
def paint(self, painter, option, index):
option = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(option, index)
if not option.text:
return
if option.widget is not None:
style = option.widget.style()
else:
style = QtGui.QApplication.style()
style.drawPrimitive(
QtGui.QStyle.PE_PanelItemViewRow, option, painter,
option.widget)
cell_rect = option.rect
itemrect = QtCore.QRect(0, 0, cell_rect.height(), cell_rect.width())
opt = QtGui.QStyleOptionViewItemV4(option)
opt.rect = itemrect
textrect = style.subElementRect(
QtGui.QStyle.SE_ItemViewItemText, opt, opt.widget)
painter.save()
painter.setFont(option.font)
if option.displayAlignment & (Qt.AlignTop | Qt.AlignBottom):
brect = painter.boundingRect(
textrect, option.displayAlignment, option.text)
diff = textrect.height() - brect.height()
offset = max(min(diff / 2, self.Margin), 0)
if option.displayAlignment & Qt.AlignBottom:
offset = -offset
textrect.translate(0, offset)
painter.translate(option.rect.x(), option.rect.bottom())
painter.rotate(-90)
painter.drawText(textrect, option.displayAlignment, option.text)
painter.restore()
##############################################################################
# progress bar management
class ProgressBar:
def __init__(self, widget, iterations):
self.iter = iterations
self.widget = widget
self.count = 0
self.widget.progressBarInit()
def advance(self, count=1):
self.count += count
self.widget.progressBarSet(int(self.count * 100 / max(1, self.iter)))
def finish(self):
self.widget.progressBarFinished()
##############################################################################
def tabWidget(widget):
w = QtGui.QTabWidget(widget)
if widget.layout() is not None:
widget.layout().addWidget(w)
return w
def createTabPage(tabWidget, name, widgetToAdd=None, canScroll=False):
if widgetToAdd is None:
widgetToAdd = widgetBox(tabWidget, addToLayout=0, margin=4)
if canScroll:
scrollArea = QtGui.QScrollArea()
tabWidget.addTab(scrollArea, name)
scrollArea.setWidget(widgetToAdd)
scrollArea.setWidgetResizable(1)
scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
else:
tabWidget.addTab(widgetToAdd, name)
return widgetToAdd
def table(widget, rows=0, columns=0, selectionMode=-1, addToLayout=True):
w = QtGui.QTableWidget(rows, columns, widget)
if widget and addToLayout and widget.layout() is not None:
widget.layout().addWidget(w)
if selectionMode != -1:
w.setSelectionMode(selectionMode)
w.setHorizontalScrollMode(QtGui.QTableWidget.ScrollPerPixel)
w.horizontalHeader().setMovable(True)
return w
class VisibleHeaderSectionContextEventFilter(QtCore.QObject):
def __init__(self, parent, itemView=None):
super().__init__(parent)
self.itemView = itemView
def eventFilter(self, view, event):
if not isinstance(event, QtGui.QContextMenuEvent):
return False
model = view.model()
headers = [(view.isSectionHidden(i),
model.headerData(i, view.orientation(), Qt.DisplayRole)
) for i in range(view.count())]
menu = QtGui.QMenu("Visible headers", view)
for i, (checked, name) in enumerate(headers):
action = QtGui.QAction(name, menu)
action.setCheckable(True)
action.setChecked(not checked)
menu.addAction(action)
def toogleHidden(b, section=i):
view.setSectionHidden(section, not b)
if not b:
return
if self.itemView:
self.itemView.resizeColumnToContents(section)
else:
view.resizeSection(section,
max(view.sectionSizeHint(section), 10))
action.toggled.connect(toogleHidden)
menu.exec_(event.globalPos())
return True
def checkButtonOffsetHint(button, style=None):
option = QtGui.QStyleOptionButton()
option.initFrom(button)
if style is None:
style = button.style()
if isinstance(button, QtGui.QCheckBox):
pm_spacing = QtGui.QStyle.PM_CheckBoxLabelSpacing
pm_indicator_width = QtGui.QStyle.PM_IndicatorWidth
else:
pm_spacing = QtGui.QStyle.PM_RadioButtonLabelSpacing
pm_indicator_width = QtGui.QStyle.PM_ExclusiveIndicatorWidth
space = style.pixelMetric(pm_spacing, option, button)
width = style.pixelMetric(pm_indicator_width, option, button)
# TODO: add other styles (Maybe load corrections from .cfg file?)
style_correction = {"macintosh (aqua)": -2, "macintosh(aqua)": -2,
"plastique": 1, "cde": 1, "motif": 1}
return space + width + \
style_correction.get(QtGui.qApp.style().objectName().lower(), 0)
def toolButtonSizeHint(button=None, style=None):
if button is None and style is None:
style = QtGui.qApp.style()
elif style is None:
style = button.style()
button_size = \
style.pixelMetric(QtGui.QStyle.PM_SmallIconSize) + \
style.pixelMetric(QtGui.QStyle.PM_ButtonMargin)
return button_size
class FloatSlider(QtGui.QSlider):
valueChangedFloat = Signal(float)
def __init__(self, orientation, min_value, max_value, step, parent=None):
super().__init__(orientation, parent)
self.setScale(min_value, max_value, step)
self.valueChanged[int].connect(self.sendValue)
def update(self):
self.setSingleStep(1)
if self.min_value != self.max_value:
self.setEnabled(True)
self.setMinimum(int(self.min_value / self.step))
self.setMaximum(int(self.max_value / self.step))
else:
self.setEnabled(False)
def sendValue(self, slider_value):
value = min(max(slider_value * self.step, self.min_value),
self.max_value)
self.valueChangedFloat.emit(value)
def setValue(self, value):
super().setValue(value // self.step)
def setScale(self, minValue, maxValue, step=0):
if minValue >= maxValue:
## It would be more logical to disable the slider in this case
## (self.setEnabled(False))
## However, we do nothing to keep consistency with Qwt
# TODO If it's related to Qwt, remove it
return
if step <= 0 or step > (maxValue - minValue):
if isinstance(maxValue, int) and isinstance(minValue, int):
step = 1
else:
step = float(minValue - maxValue) / 100.0
self.min_value = float(minValue)
self.max_value = float(maxValue)
self.step = step
self.update()
def setRange(self, minValue, maxValue, step=1.0):
# For compatibility with qwtSlider
# TODO If it's related to Qwt, remove it
self.setScale(minValue, maxValue, step)
| PythonCharmers/orange3 | Orange/widgets/gui.py | Python | gpl-3.0 | 126,299 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CodeBug Tether documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 20 15:23:09 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CodeBug Tether'
copyright = '2015, OpenLX'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The short X.Y version.
# NOTE: this is the version of the firmware download too so don't change
# this with minor changes in the software if the firmware hasn't changed!
version = '0.8.5'
# The full version, including alpha/beta/rc tags.
release = '0.8.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# html_theme = 'haiku'
# html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'codebug_tetherdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'codebug_tether.tex', 'CodeBug Tether Documentation',
'Thomas Preston', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'codebug_tether', 'CodeBug Tether Documentation',
['Thomas Preston'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'codebug_tether', 'CodeBug Tether Documentation',
'Thomas Preston', 'codebug_tether', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
todo_include_todos = True
# A string of reStructuredText that will be included at the end of every
# source file that is read. This is the right place to add substitutions
# that should be available in every file.
rst_epilog = """
.. |firmwaredownload| raw:: html
<a href="https://github.com/codebugtools/codebug_tether/blob/master/firmware/codebug_tether_v{version}.cbg?raw=true">download</a>
""".format(version=version)
| codebugtools/codebug_tether | docs/conf.py | Python | gpl-3.0 | 8,707 |
'''
Nibblegen: A script to convert LaTex text to html usable in Nibbleblog Forked from the latex2wp project (the licenceing for which is below).
Copyright (C) 2014 Theodore Jones
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
"""
Copyright 2009 Luca Trevisan
Additional contributors: Radu Grigore
LaTeX2WP version 0.6.2
This file is part of LaTeX2WP, a program that converts
a LaTeX document into a format that is ready to be
copied and pasted into WordPress.
You are free to redistribute and/or modify LaTeX2WP under the
terms of the GNU General Public License (GPL), version 3
or (at your option) any later version.
I hope you will find LaTeX2WP useful, but be advised that
it comes WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GPL for more details.
You should have received a copy of the GNU General Public
License along with LaTeX2WP. If you can't find it,
see <http://www.gnu.org/licenses/>.
"""
import re
from sys import argv
from latex2wpstyle import *
# prepare variables computed from the info in latex2wpstyle
count = dict()
for thm in ThmEnvs:
count[T[thm]] = 0
count["section"] = count["subsection"] = count["equation"] = 0
ref={}
endlatex = "&fg="+textcolor
if HTML : endproof = ""
inthm = ""
"""
At the beginning, the commands \$, \% and \& are temporarily
replaced by placeholders (the second entry in each 4-tuple).
At the end, The placeholders in text mode are replaced by
the third entry, and the placeholders in math mode are
replaced by the fourth entry.
"""
esc = [["\\$","_dollar_","$","\\$"],
["\\%","_percent_","%","\\%"],
["\\&","_amp_","&","\\&"],
[">","_greater_",">",">"],
["<","_lesser_","<","<"]]
M = M + [ ["\\more","<!--more-->"],
["\\newblock","\\\\"],
["\\sloppy",""],
["\\S","§"]]
Mnomath =[["\\\\","<br/>\n"],
["\\ "," "],
["\\`a","à"],
["\\'a","á"],
["\\\"a","ä"],
["\\aa ","å"],
["{\\aa}","å"],
["\\`e","è"],
["\\'e","é"],
["\\\"e","ë"],
["\\`i","ì"],
["\\'i","í"],
["\\\"i","ï"],
["\\`o","ò"],
["\\'o","ó"],
["\\\"o","ö"],
["\\`o","ò"],
["\\'o","ó"],
["\\\"o","ö"],
["\\H o","ö"],
["\\`u","ù"],
["\\'u","ú"],
["\\\"u","ü"],
["\\`u","ù"],
["\\'u","ú"],
["\\\"u","ü"],
["\\v{C}","Č"]]
cb = re.compile("\\{|}")
def extractbody(m) :
begin = re.compile("\\\\begin\s*")
m= begin.sub("\\\\begin",m)
end = re.compile("\\\\end\s*")
m = end.sub("\\\\end",m)
beginenddoc = re.compile("\\\\begin\\{document}"
"|\\\\end\\{document}")
parse = beginenddoc.split(m)
if len(parse)== 1 :
m = parse[0]
else :
m = parse[1]
"""
removes comments, replaces double returns with <p> and
other returns and multiple spaces by a single space.
"""
for e in esc :
m = m.replace(e[0],e[1])
comments = re.compile("%.*?\n")
m=comments.sub(" ",m)
multiplereturns = re.compile("\n\n+")
m= multiplereturns.sub ("<p>",m)
spaces=re.compile("(\n|[ ])+")
m=spaces.sub(" ",m)
"""
removes text between \iffalse ... \fi and
between \iftex ... \fi keeps text between
\ifblog ... \fi
"""
ifcommands = re.compile("\\\\iffalse|\\\\ifblog|\\\\iftex|\\\\fi")
L=ifcommands.split(m)
I=ifcommands.findall(m)
m= L[0]
for i in range(1,(len(L)+1)/2) :
if (I[2*i-2]=="\\ifblog") :
m=m+L[2*i-1]
m=m+L[2*i]
"""
changes $$ ... $$ into \[ ... \] and reformats
eqnarray* environments as regular array environments
"""
doubledollar = re.compile("\\$\\$")
L=doubledollar.split(m)
m=L[0]
for i in range(1,(len(L)+1)/2) :
m = m+ "\\[" + L[2*i-1] + "\\]" + L[2*i]
m=m.replace("\\begin{eqnarray*}","\\[ \\begin{array}{rcl} ")
m=m.replace("\\end{eqnarray*}","\\end{array} \\]")
return m
def convertsqb(m) :
r = re.compile("\\\\item\\s*\\[.*?\\]")
Litems = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Litems)) :
s= Litems[i]
s=s.replace("\\item","\\nitem")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
r = re.compile("\\\\begin\\s*\\{\\w+}\\s*\\[.*?\\]")
Lthms = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Lthms)) :
s= Lthms[i]
s=s.replace("\\begin","\\nbegin")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
return m
def converttables(m) :
retable = re.compile("\\\\begin\s*\\{tabular}.*?\\\\end\s*\\{tabular}"
"|\\\\begin\s*\\{btabular}.*?\\\\end\s*\\{btabular}")
tables = retable.findall(m)
rest = retable.split(m)
m = rest[0]
for i in range(len(tables)) :
if tables[i].find("{btabular}") != -1 :
m = m + convertonetable(tables[i],True)
else :
m = m + convertonetable(tables[i],False)
m = m + rest[i+1]
return m
def convertmacros(m) :
comm = re.compile("\\\\[a-zA-Z]*")
commands = comm.findall(m)
rest = comm.split(m)
r= rest[0]
for i in range( len (commands) ) :
for s1,s2 in M :
if s1==commands[i] :
commands[i] = s2
r=r+commands[i]+rest[i+1]
return(r)
def convertonetable(m,border) :
tokens = re.compile("\\\\begin\\{tabular}\s*\\{.*?}"
"|\\\\end\\{tabular}"
"|\\\\begin\\{btabular}\s*\\{.*?}"
"|\\\\end\\{btabular}"
"|&|\\\\\\\\")
align = { "c" : "center", "l" : "left" , "r" : "right" }
T = tokens.findall(m)
C = tokens.split(m)
L = cb.split(T[0])
format = L[3]
columns = len(format)
if border :
m = "<table border=\"1\" align=center>"
else :
m="<table align = center><tr>"
p=1
i=0
while T[p-1] != "\\end{tabular}" and T[p-1] != "\\end{btabular}":
m = m + "<td align="+align[format[i]]+">" + C[p] + "</td>"
p=p+1
i=i+1
if T[p-1]=="\\\\" :
for i in range (p,columns) :
m=m+"<td></td>"
m=m+"</tr><tr>"
i=0
m = m+ "</tr></table>"
return (m)
def separatemath(m) :
mathre = re.compile("\\$.*?\\$"
"|\\\\begin\\{equation}.*?\\\\end\\{equation}"
"|\\\\\\[.*?\\\\\\]")
math = mathre.findall(m)
text = mathre.split(m)
return(math,text)
def processmath( M ) :
R = []
counteq=0
global ref
mathdelim = re.compile("\\$"
"|\\\\begin\\{equation}"
"|\\\\end\\{equation}"
"|\\\\\\[|\\\\\\]")
label = re.compile("\\\\label\\{.*?}")
for m in M :
md = mathdelim.findall(m)
mb = mathdelim.split(m)
"""
In what follows, md[0] contains the initial delimiter,
which is either \begin{equation}, or $, or \[, and
mb[1] contains the actual mathematical equation
"""
if md[0] == "$" :
if HTML :
m=m.replace("$","")
m="$$"+m+""+endlatex+"$$"
else :
m="$$ {"+mb[1]+"}"+endlatex+"$$"
else :
if md[0].find("\\begin") != -1 :
count["equation"] += 1
mb[1] = mb[1] + "\\ \\ \\ \\ \\ ("+str(count["equation"])+")"
if HTML :
m = "<p align=center>$$" + mb[1] +endlatex+"$$" + "</p>\n"
else :
m = "<p align=center>$$ " + mb[1] +endlatex+"$$</p>\n"
if m.find("\\label") != -1 :
mnolab = label.split(m)
mlab = label.findall(m)
"""
Now the mathematical equation, which has already
been formatted for WordPress, is the union of
the strings mnolab[0] and mnolab[1]. The content
of the \label{...} command is in mlab[0]
"""
lab = mlab[0]
lab=cb.split(lab)[1]
lab=lab.replace(":","")
ref[lab]=count["equation"]
m="<a name=\""+lab+"\">"+mnolab[0]+mnolab[1]+"</a>"
R= R + [m]
return R
def convertcolors(m,c) :
if m.find("begin") != -1 :
return("<span style=\"color:#"+colors[c]+";\">")
else :
return("</span>")
def convertitm(m) :
if m.find("begin") != -1 :
return ("\n\n<ul>")
else :
return ("\n</ul>\n\n")
def convertenum(m) :
if m.find("begin") != -1 :
return ("\n\n<ol>")
else :
return ("\n</ol>\n\n")
def convertbeginnamedthm(thname,thm) :
global inthm
count[T[thm]] +=1
inthm = thm
t = beginnamedthm.replace("_ThmType_",thm.capitalize())
t = t.replace("_ThmNumb_",str(count[T[thm]]))
t = t.replace("_ThmName_",thname)
return(t)
def convertbeginthm(thm) :
global inthm
count[T[thm]] +=1
inthm = thm
t = beginthm.replace("_ThmType_",thm.capitalize())
t = t.replace("_ThmNumb_",str(count[T[thm]]))
return(t)
def convertendthm(thm) :
global inthm
inthm = ""
return(endthm)
def convertlab(m) :
global inthm
global ref
m=cb.split(m)[1]
m=m.replace(":","")
if inthm != "" :
ref[m]=count[T[inthm]]
else :
ref[m]=count["section"]
return("<a name=\""+m+"\"></a>")
def convertproof(m) :
if m.find("begin") != -1 :
return(beginproof)
else :
return(endproof)
def convertsection (m) :
L=cb.split(m)
"""
L[0] contains the \\section or \\section* command, and
L[1] contains the section name
"""
if L[0].find("*") == -1 :
t=section
count["section"] += 1
count["subsection"]=0
else :
t=sectionstar
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SecName_",L[1])
return(t)
def convertsubsection (m) :
L=cb.split(m)
if L[0].find("*") == -1 :
t=subsection
else :
t=subsectionstar
count["subsection"] += 1
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SubSecNumb_",str(count["subsection"]) )
t=t.replace("_SecName_",L[1])
return(t)
def converturl (m) :
L = cb.split(m)
return ("<a href=\""+L[1]+"\">"+L[3]+"</a>")
def converturlnosnap (m) :
L = cb.split(m)
return ("<a class=\"snap_noshots\" href=\""+L[1]+"\">"+L[3]+"</a>")
def convertimage (m) :
L = cb.split (m)
return ("<p align=center><img "+L[1] + " src=\""+L[3]
+"\"></p>")
def convertstrike (m) :
L=cb.split(m)
return("<s>"+L[1]+"</s>")
def processtext ( t ) :
p = re.compile("\\\\begin\\{\\w+}"
"|\\\\nbegin\\{\\w+}\\s*\\{.*?}"
"|\\\\end\\{\\w+}"
"|\\\\item"
"|\\\\nitem\\s*\\{.*?}"
"|\\\\label\\s*\\{.*?}"
"|\\\\section\\s*\\{.*?}"
"|\\\\section\\*\\s*\\{.*?}"
"|\\\\subsection\\s*\\{.*?}"
"|\\\\subsection\\*\\s*\\{.*?}"
"|\\\\href\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\hrefnosnap\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\image\\s*\\{.*?}\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\sout\\s*\\{.*?}")
for s1, s2 in Mnomath :
t=t.replace(s1,s2)
ttext = p.split(t)
tcontrol = p.findall(t)
w = ttext[0]
i=0
while i < len(tcontrol) :
if tcontrol[i].find("{itemize}") != -1 :
w=w+convertitm(tcontrol[i])
elif tcontrol[i].find("{enumerate}") != -1 :
w= w+convertenum(tcontrol[i])
elif tcontrol[i][0:5]=="\\item" :
w=w+"<li>"
elif tcontrol[i][0:6]=="\\nitem" :
lb = tcontrol[i][7:].replace("{","")
lb = lb.replace("}","")
w=w+"<li>"+lb
elif tcontrol[i].find("\\hrefnosnap") != -1 :
w = w+converturlnosnap(tcontrol[i])
elif tcontrol[i].find("\\href") != -1 :
w = w+converturl(tcontrol[i])
elif tcontrol[i].find("{proof}") != -1 :
w = w+convertproof(tcontrol[i])
elif tcontrol[i].find("\\subsection") != -1 :
w = w+convertsubsection(tcontrol[i])
elif tcontrol[i].find("\\section") != -1 :
w = w+convertsection(tcontrol[i])
elif tcontrol[i].find("\\label") != -1 :
w=w+convertlab(tcontrol[i])
elif tcontrol[i].find("\\image") != -1 :
w = w+convertimage(tcontrol[i])
elif tcontrol[i].find("\\sout") != -1 :
w = w+convertstrike(tcontrol[i])
elif tcontrol[i].find("\\begin") !=-1 and tcontrol[i].find("{center}")!= -1 :
w = w+"<p align=center>"
elif tcontrol[i].find("\\end")!= -1 and tcontrol[i].find("{center}") != -1 :
w = w+"</p>"
else :
for clr in colorchoice :
if tcontrol[i].find("{"+clr+"}") != -1:
w=w + convertcolors(tcontrol[i],clr)
for thm in ThmEnvs :
if tcontrol[i]=="\\end{"+thm+"}" :
w=w+convertendthm(thm)
elif tcontrol[i]=="\\begin{"+thm+"}":
w=w+convertbeginthm(thm)
elif tcontrol[i].find("\\nbegin{"+thm+"}") != -1:
L=cb.split(tcontrol[i])
thname=L[3]
w=w+convertbeginnamedthm(thname,thm)
w += ttext[i+1]
i += 1
return processfontstyle(w)
def processfontstyle(w) :
close = dict()
ww = ""
level = i = 0
while i < len(w):
special = False
for k, v in fontstyle.items():
l = len(k)
if w[i:i+l] == k:
level += 1
ww += '<' + v + '>'
close[level] = '</' + v + '>'
i += l
special = True
if not special:
if w[i] == '{':
ww += '{'
level += 1
close[level] = '}'
elif w[i] == '}' and level > 0:
ww += close[level]
level -= 1
else:
ww += w[i]
i += 1
return ww
def convertref(m) :
global ref
p=re.compile("\\\\ref\s*\\{.*?}|\\\\eqref\s*\\{.*?}")
T=p.split(m)
M=p.findall(m)
w = T[0]
for i in range(len(M)) :
t=M[i]
lab=cb.split(t)[1]
lab=lab.replace(":","")
if t.find("\\eqref") != -1 :
w=w+"<a href=\"#"+lab+"\">("+str(ref[lab])+")</a>"
else :
w=w+"<a href=\"#"+lab+"\">"+str(ref[lab])+"</a>"
w=w+T[i+1]
return w
"""
The program makes several passes through the input.
In a first clean-up, all text before \begin{document}
and after \end{document}, if present, is removed,
all double-returns are converted
to <p>, and all remaining returns are converted to
spaces.
The second step implements a few simple macros. The user can
add support for more macros if desired by editing the
convertmacros() procedure.
Then the program separates the mathematical
from the text parts. (It assumes that the document does
not start with a mathematical expression.)
It makes one pass through the text part, translating
environments such as theorem, lemma, proof, enumerate, itemize,
\em, and \bf. Along the way, it keeps counters for the current
section and subsection and for the current numbered theorem-like
environment, as well as a flag that tells whether one is
inside a theorem-like environment or not. Every time a \label{xx}
command is encountered, we give ref[xx] the value of the section
in which the command appears, or the number of the theorem-like
environment in which it appears (if applicable). Each appearence
of \label is replace by an html "name" tag, so that later we can
replace \ref commands by clickable html links.
The next step is to make a pass through the mathematical environments.
Displayed equations are numbered and centered, and when a \label{xx}
command is encountered we give ref[xx] the number of the current
equation.
A final pass replaces \ref{xx} commands by the number in ref[xx],
and a clickable link to the referenced location.
"""
import sys
s = ""
while True:
char = sys.stdin.read(1)
if not char:
break
if char:
s = s + char
"""
extractbody() takes the text between a \begin{document}
and \end{document}, if present, (otherwise it keeps the
whole document), normalizes the spacing, and removes comments
"""
s=extractbody(s)
# formats tables
s=converttables(s)
# reformats optional parameters passed in square brackets
s=convertsqb(s)
#implement simple macros
s=convertmacros(s)
# extracts the math parts, and replaces the with placeholders
# processes math and text separately, then puts the processed
# math equations in place of the placeholders
(math,text) = separatemath(s)
s=text[0]
for i in range(len(math)) :
s=s+"__math"+str(i)+"__"+text[i+1]
s = processtext ( s )
math = processmath ( math )
# converts escape sequences such as \$ to HTML codes
# This must be done after formatting the tables or the '&' in
# the HTML codes will create problems
for e in esc :
s=s.replace(e[1],e[2])
for i in range ( len ( math ) ) :
math[i] = math[i].replace(e[1],e[3])
# puts the math equations back into the text
for i in range(len(math)) :
s=s.replace("__math"+str(i)+"__",math[i])
# translating the \ref{} commands
s=convertref(s)
if HTML :
s="<head><style>body{max-width:55em;}a:link{color:#4444aa;}a:visited{color:#4444aa;}a:hover{background-color:#aaaaFF;}</style></head><body>"+s+"</body></html>"
s = s.replace("<p>","\n<p>\n")
print s
| theoj2/Nibbletex | nibblegen/nibblegen.py | Python | gpl-3.0 | 19,283 |
Subsets and Splits