repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kamijawa/ogc_server | test/test_import_vcf.py | 1 | 2376 | # -*- coding: utf-8 -*-
import os, sys, codecs
import json
from pydash import py_ as _
ENCODING = 'utf-8'
ENCODING1 = 'gb18030'
def dec(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING)
text, length = gb18030_decode(aStr, 'replace')
return text
def enc(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING)
text, length = gb18030_encode(aStr, 'replace')
return text
def dec1(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING1)
text, length = gb18030_decode(aStr, 'replace')
return text
def enc1(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING1)
text, length = gb18030_encode(aStr, 'replace')
return text
VCFPATH = ur'd:\联系人_002.vcf'
def test():
lines = []
contacts = []
with open(VCFPATH) as f:
lines = f.readlines()
begin = False
o = None
for line in lines:
line = line.strip()
if line == 'BEGIN:VCARD':
begin = True
o = {}
continue
if line == 'END:VCARD':
begin = False
if o and o.has_key('tel') and o.has_key('name'):
contacts.append(o)
continue
if begin:
if _.starts_with(line, 'N;'):
o['name'] = line[line.index(':')+1:]
o['name'] = o['name'].split(';')
o['name'] = filter(lambda x:len(x)>0, o['name'])
o['name'] = map(convert, o['name'])
o['name'] = ''.join(o['name'])
if _.starts_with(line, 'TEL;'):
if not o.has_key('tel'):
o['tel'] = []
o['tel'].append(line[line.index(':')+1:])
# print(contacts)
s = json.dumps(contacts, ensure_ascii=False, indent=4)
with codecs.open(ur'd:\contacts.json', 'w', 'utf-8-sig') as f:
f.write(s)
def convert(s):
s = s.replace('=', '\\x')
return dec(codecs.escape_decode(s)[0])
def printf():
contacts = []
with codecs.open(ur'd:\contacts.json', 'r', 'utf-8-sig') as f:
contacts = json.loads(f.read())
for contact in contacts:
print('%s:%s' % (contact['name'], ','.join(contact['tel'])))
if __name__ == "__main__":
printf() | mit | 3,756,588,771,481,258,000 | 29.792208 | 94 | 0.547679 | false | 3.207037 | false | false | false |
armills/agocontrol | devices/agosimulator.py | 4 | 1773 | #! /usr/bin/env python
import random
import sys
import syslog
import socket
import threading
import time
import agoclient
client = agoclient.AgoConnection("simulator")
def messageHandler(internalid, content):
if "command" in content:
if content["command"] == "on":
print "switching on: " + internalid
client.emitEvent(internalid, "event.device.statechanged", "255", "")
if content["command"] == "off":
print "switching off: " + internalid
client.emitEvent(internalid, "event.device.statechanged", "0", "")
if content["command"] == "push":
print "push button: " + internalid
if content['command'] == 'setlevel':
if 'level' in content:
print "device level changed", content["level"]
client.emitEvent(internalid, "event.device.statechanged", content["level"], "")
client.addHandler(messageHandler)
client.addDevice("123", "dimmer")
client.addDevice("124", "switch")
client.addDevice("125", "binarysensor")
client.addDevice("126", "multilevelsensor")
client.addDevice("127", "pushbutton")
class testEvent(threading.Thread):
def __init__(self,):
threading.Thread.__init__(self)
def run(self):
level = 0
counter = 0
while (True):
counter = counter + 1
if counter > 3:
counter = 0
temp = random.randint(50,300) / 10
client.emitEvent("126", "event.environment.temperaturechanged", temp, "degC");
client.emitEvent("126", "event.environment.humiditychanged", random.randint(20, 75), "percent");
client.emitEvent("125", "event.security.sensortriggered", level, "")
if (level == 0):
level = 255
else:
level = 0
time.sleep (5)
background = testEvent()
background.setDaemon(True)
background.start()
syslog.syslog(syslog.LOG_NOTICE, "agosimulator.py startup")
client.run()
| gpl-3.0 | 1,214,087,210,189,917,000 | 27.142857 | 99 | 0.685843 | false | 3.320225 | false | false | false |
evgenybf/pyXLWriter | examples/simple.py | 1 | 1245 | #!/usr/bin/env python
# This example script was ported from Perl Spreadsheet::WriteExcel module.
# The author of the Spreadsheet::WriteExcel module is John McNamara
# <[email protected]>
__revision__ = """$Id: simple.py,v 1.9 2004/01/31 18:56:07 fufff Exp $"""
#######################################################################
#
# Example of how to use the WriteExcel module to write text and numbers
# to an Excel binary file.
#
# reverse('(c)'), March 2001, John McNamara, [email protected]
#
import pyXLWriter as xl
# Create a new workbook called simple.xls and add a worksheet
workbook = xl.Writer("simple.xls")
worksheet = workbook.add_worksheet()
# The general syntax is write(row, column, token). Note that row and
# column are zero indexed
# Write some text
worksheet.write([0, 0], "Hi Excel!")
# Write some numbers
worksheet.write([2, 0], 3) # Writes 3
worksheet.write([3, 0], 3.00000) # Writes 3
worksheet.write([4, 0], 3.00001) # Writes 3.00001
worksheet.write([5, 0], 3.14159) # TeX revision no.?
# Write some formulas
worksheet.write([7, 0], '=A3 + A6')
worksheet.write([8, 0], '=IF(A5>3,"Yes", "No")')
# Write a hyperlink
worksheet.write([10, 0], 'http://www.perl.com/')
workbook.close() | lgpl-2.1 | -5,640,929,469,606,742,000 | 29.390244 | 74 | 0.643373 | false | 3.074074 | false | true | false |
levilucio/SyVOLT | ECore_Copier_MM/transformation-Large/HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature.py | 1 | 5144 |
from core.himesis import Himesis
class HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature, self).__init__(name='HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature"""
self["GUID__"] = 2496964863449983084
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 7878357618895443413
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 7103443176050273994
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 4075457791616116234
self.vs[3]["associationType"] = """eStructuralFeatures"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 82093709888820896
self.vs[4]["associationType"] = """eStructuralFeatures"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 7701273925430543805
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EClass"""
self.vs[5]["mm__"] = """EClass"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 7983479623067543816
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 2490505581444583091
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EStructuralFeature"""
self.vs[7]["mm__"] = """EStructuralFeature"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 3944901957551387274
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 6559812938848820978
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EClass"""
self.vs[9]["mm__"] = """EClass"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 1448143835880876879
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 122443353551202861
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EStructuralFeature"""
self.vs[11]["mm__"] = """EStructuralFeature"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 4455295229616770163
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 866889581155583712
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 1191675117595068943
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 2798140781034193118
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 7657253808431851836
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 8743892830884456720
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 3267268965152823955
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 445142670763407592
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 3054582158653006612
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 4367346554362163209
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 5673578323192681610
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 4531990103416906788
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 870047253623542103
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 3086761965923965550
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 2044225800229322622
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 7763793050620366314
| mit | -6,759,567,593,965,910,000 | 48.941748 | 298 | 0.528966 | false | 2.976852 | false | false | false |
davmre/treegp | experiments/code/aggregate_results.py | 1 | 2391 | import numpy as np
import re
from treegp.experiments.code.datasets import predict_results_fname, timing_results_fname, gp_fname
from treegp.gp import GP
import scipy.sparse
model_list_fname = "models"
basedir = "experiments/models/"
class NoResultsError(Exception):
pass
def extract_results(txt, prefix, require_times=True):
for line in txt:
if line.startswith(prefix):
if (not require_times) or "times" in line:
d = dict()
for w in ("mean", "std", "min", "10th", "50th", "90th", "max"):
d[w] = float(re.search(r"%s ([\.\d]+)" % w, line).group(1))
return d
raise NoResultsError("could not find line with prefix %s" % prefix)
def parse_timings(timings_lines):
sparse = extract_results(timings_lines, "sparse covar")
hybrid = extract_results(timings_lines, "sparse covar spkernel")
tree = extract_results(timings_lines, "tree: eps_abs")
return sparse, hybrid, tree
with open(model_list_fname, 'r') as f:
model_lines = f.readlines()
print_timings = False
print_fullness = True
for line in model_lines:
dataset, model, tag = line.strip().split()
accuracy_fname = predict_results_fname(dataset, model, tag)
timing_fname = timing_results_fname(dataset, model, tag)
trained_fname = gp_fname(dataset, model, tag=tag)
if print_timings:
try:
with open(timing_fname, 'r') as f:
timings_lines = f.readlines()
except IOError:
continue
sparse, hybrid, tree = parse_timings(timings_lines)
print dataset, model, sparse['mean']*1000, sparse['std']*1000, hybrid['mean']*1000, hybrid['std']* 1000, tree['mean']*1000, tree['std']*1000
else:
with open(accuracy_fname, 'r') as f:
acc_lines = f.readlines()
msll = float(acc_lines[0].split()[1])
smse = float(acc_lines[1].split()[1])
if print_fullness:
sgp = GP(fname=trained_fname, build_tree=False)
if scipy.sparse.issparse(sgp.Kinv):
fullness = float(len(sgp.Kinv.nonzero()[0])) / sgp.Kinv.shape[0]**2
else:
fullness = float(np.sum(np.abs(sgp.Kinv) > sgp.sparse_threshold)) / sgp.Kinv.shape[0]**2
fullness *= 100.0
else:
fullness = -1
print dataset, model, fullness, msll, smse
| gpl-3.0 | 2,367,399,508,398,024,000 | 33.157143 | 148 | 0.605604 | false | 3.445245 | false | false | false |
cwoebker/paxo | test_paxo.py | 1 | 3806 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Paxo."""
import unittest
from clint import arguments
from paxo.core import Paxo
from paxo.command import Command, cmd, define_command, Collection
from paxo.util import ExitStatus, is_win, is_lin, is_osx, args
class AbstractTestCase(unittest.TestCase):
pass
class GeneralTestCase(AbstractTestCase):
"""Important things that shouldn't change across versions"""
def test_exit_status(self):
self.assertEqual(ExitStatus.OK, 0)
self.assertEqual(ExitStatus.ERROR, 1)
self.assertEqual(ExitStatus.ABORT, 2)
self.assertEqual(ExitStatus.HELP, 3)
self.assertEqual(ExitStatus.VERSION, 4)
self.assertEqual(ExitStatus.UNSUPPORTED, 5)
def test_operating_system(self):
def fn(c):
if c:
return 1
return 0
self.assertTrue(sum(map(fn, [is_win, is_lin, is_osx])) <= 1)
def test_arguments(self):
self.assertTrue(isinstance(args, arguments.Args))
class PaxoTestCase(AbstractTestCase):
"""Paxo test cases."""
def setUp(self):
self.paxo = Paxo('paxo', 'a test paxo', '<do this>', '0.1')
def tearDown(self):
"""Teardown."""
del self.paxo
def test_init(self):
self.assertEqual(self.paxo.name, 'paxo')
self.assertEqual(self.paxo.description, 'a test paxo')
self.assertEqual(self.paxo.command_info, '<do this>')
self.assertEqual(self.paxo.version, '0.1')
# self.assertEqual(self.paxo.__class__, '') # verify this later with Juan
def test_info(self):
pass
def test_help(self):
pass
class CommandTestCase(AbstractTestCase):
def tearDown(self):
Collection.clear_commands()
def test_define_command(self):
ret = define_command(name='test', fn=len, usage='test (<test_arg>)',
help='testing stuff')
self.assertTrue(isinstance(ret, Command))
self.assertEqual(len(Collection.list_commands()), 1)
class CommandManagerTestCase(AbstractTestCase):
def setUp(self):
self.testCommand = define_command(name='test', fn=len, usage='test (<test_arg>)',
help='testing stuff')
def tearDown(self):
Collection.clear_commands()
def test_cmd_decorator_command(self):
@cmd()
def hello(args):
print('Hello World!')
self.assertEqual(hello, Collection.lookup_command('hello').fn)
def test_list_commands(self):
self.assertEqual(len(Collection.list_commands()), len(Collection.COMMANDS))
self.assertEqual(len(Collection.list_commands()), 1)
def test_lookup_command(self):
self.assertTrue(isinstance(Collection.lookup_command('test'), Command))
self.assertEqual(Collection.lookup_command('test'), self.testCommand)
def test_register_command(self):
test = Command(name='test1', short=None, fn=len,
usage='test1 hi', help="testing stuff 1")
Collection.register_command(test)
self.assertEqual(test, Collection.lookup_command('test1'))
def test_double_command(self):
test = Command(name='test', short=None, fn=len,
usage='test1 hi', help="testing stuff 1")
self.assertFalse(Collection.register_command(test))
class CrossPlatformTestCase(AbstractTestCase):
pass
class ExecuteTestCase(AbstractTestCase):
def setUp(self):
self.paxo = Paxo('paxo', 'a test paxo', '<do this>', '0.1')
def tearDown(self):
del self.paxo
class TextTestCase(AbstractTestCase):
pass
class StorageTestCase(AbstractTestCase):
pass
class AutoStartTestCase(AbstractTestCase):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 4,723,879,664,171,489,000 | 26.985294 | 89 | 0.635575 | false | 3.863959 | true | false | false |
jiangtyd/crewviewer | migrations/versions/3f82af559c8a_.py | 1 | 1772 | """empty message
Revision ID: 3f82af559c8a
Revises: None
Create Date: 2015-10-16 03:37:42.100489
"""
# revision identifiers, used by Alembic.
revision = '3f82af559c8a'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('crew_battle',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('team1_name', sa.String(length=64), nullable=False),
sa.Column('team2_name', sa.String(length=64), nullable=False),
sa.Column('total_stocks', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crew_match',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('battle_id', sa.Integer(), nullable=False),
sa.Column('battle_match_index', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['battle_id'], ['crew_battle.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('match_result',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=False),
sa.Column('match_player_index', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('character', sa.String(length=32), nullable=False),
sa.Column('initial_stocks', sa.Integer(), nullable=False),
sa.Column('final_stocks', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['match_id'], ['crew_match.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('match_result')
op.drop_table('crew_match')
op.drop_table('crew_battle')
### end Alembic commands ###
| mit | 5,534,465,861,928,236,000 | 33.076923 | 66 | 0.664221 | false | 3.368821 | false | false | false |
StochasticNumerics/mimclib | tests/sc/echo_test_cmd.py | 1 | 1089 | #!/usr/bin/python
import numpy as np
import argparse
parser = argparse.ArgumentParser(add_help=True)
parser.register('type', 'bool',
lambda v: v.lower() in ("yes", "true", "t", "1"))
parser.add_argument("-db", type="bool", action="store", default=False)
parser.add_argument("-qoi_dim", type=int, action="store", default=10)
parser.add_argument("-qoi_func", type=int, action="store", default=1)
args, unknowns = parser.parse_known_args()
if args.qoi_dim:
base = "\
mimc_run.py -mimc_TOL {TOL} -qoi_seed 0 -mimc_min_dim {qoi_dim} -qoi_dim {qoi_dim} \
-mimc_M0 1 -mimc_moments 1 -mimc_bayesian False -qoi_func {qoi_func} \
".format(TOL="{TOL}",
qoi_dim=args.qoi_dim,
qoi_func=args.qoi_func)
else:
assert False
base += " ".join(unknowns)
if not args.db:
cmd_single = "python " + base + " -mimc_verbose 10 -db False "
print(cmd_single.format(TOL=0.001))
else:
cmd_multi = "python " + base + " -mimc_verbose 0 -db True -db_tag {tag} "
print cmd_multi.format(tag="sc_d{:d}_fn{:.2g}".format(args.qoi_dim, args.qoi_func), TOL=1e-10)
| gpl-2.0 | -4,881,164,111,776,599,000 | 34.129032 | 98 | 0.6382 | false | 2.544393 | false | true | false |
HPENetworking/HPEIMCUtils | PythonUtilities/Gen_ICMP_Device_CSV/gen_icmp_dev_csv.py | 1 | 3635 |
#!/usr/bin/env python3
# author: @netmanchris
"""
Copyright 2016 Hewlett Packard Enterprise Development LP.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This file will take the GET the contents of the HPE IMC Network Assets module and dump them into a CSV file called
all_assets.csv where each line of the CSV file represents one physical or logical asset as discovered by the HPE IMC
platform.
This library uses the pyhpeimc python wrapper around the IMC RESTful API to automatically push the new performance tasks
with minimal effort on the part of the user."""
import csv
from pyhpeimc.auth import *
from pyhpeimc.plat.device import *
from pyhpeimc.plat.termaccess import *
auth = IMCAuth("http://", "10.196.252.1", "8080", "admin", "admin")
all_devs = get_all_devs(auth.creds, auth.url)
def filter_icmp(all_devs):
icmp_devs = []
for dev in all_devs:
if dev['categoryId'] == '9':
icmp_devs.append(dev)
return icmp_devs
icmp_devs = filter_icmp(all_devs)
for host in icmp_devs:
locate = get_real_time_locate(host['ip'], auth.creds, auth.url)
if type(locate) is list:
if 'deviceIp' in locate[0]:
int_details = get_interface_details( locate[0]['deviceId'], locate[0]['ifIndex'], auth.creds, auth.url)
dev_details = get_dev_details(locate[0]['deviceIp'], auth.creds, auth.url)
host['SwitchIp'] = locate[0]['deviceIp']
host['SwitchInt'] = locate[0]['ifDesc']
host['intDescription'] = int_details['ifAlias']
host['SwitchName'] = dev_details['label']
host['SwitchType'] = dev_details['typeName']
host['SwitchLocation'] = dev_details['location']
host['SwitchContact'] = dev_details['contact']
else:
host['SwitchIp'] = 'Unknown'
host['SwitchInt'] = 'Unknown'
if 'mac' not in host:
host['mac'] = "Unknown"
if 'intDescription' not in host:
host['intDescription'] = "Unknown"
if 'SwitchName' not in host:
host['SwitchName'] = "Unknown"
if 'SwitchType' not in host:
host['SwitchType'] = "Unknown"
if 'SwitchLocation' not in host:
host['SwitchLocation'] = "Unknown"
if 'SwitchContact' not in host:
host['SwitchContact'] = "Unknown"
final_list = [ {'hostLabel': i['label'],
'hostIp': i['ip'],
'hostMask' : i['mask'],
'SwitchIntDesc' : i['intDescription'],
'SwitchName' : i['SwitchName'],
'SwitchType' : i['SwitchType'],
'SwitchLocation' : i['SwitchLocation'],
'SwitchContact' : i['SwitchContact'],
'hostMac' : i['mac'],
'SwitchIp' : i['SwitchIp'],
'SwitchInt' : i['SwitchInt']}for i in icmp_devs ]
keys = final_list[0].keys()
for i in final_list:
if len(i) >= len(final_list[0].keys()):
keys = final_list[final_list.index(i)].keys()
with open ('icmp_devs.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(final_list)
| apache-2.0 | -1,490,100,215,400,305,000 | 33.951923 | 121 | 0.628336 | false | 3.690355 | false | false | false |
commaai/panda | tests/gps_stability_test.py | 1 | 5589 | #!/usr/bin/env python3
# flake8: noqa
import os
import sys
import time
import random
import threading
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda, PandaSerial # noqa: E402
INIT_GPS_BAUD = 9600
GPS_BAUD = 460800
def connect():
pandas = Panda.list()
print(pandas)
# make sure two pandas are connected
if len(pandas) != 2:
print("Connect white and grey/black panda to run this test!")
assert False
# connect
pandas[0] = Panda(pandas[0])
pandas[1] = Panda(pandas[1])
white_panda = None
gps_panda = None
# find out which one is white (for spamming the CAN buses)
if pandas[0].is_white() and not pandas[1].is_white():
white_panda = pandas[0]
gps_panda = pandas[1]
elif not pandas[0].is_white() and pandas[1].is_white():
white_panda = pandas[1]
gps_panda = pandas[0]
else:
print("Connect white and grey/black panda to run this test!")
assert False
return white_panda, gps_panda
def spam_buses_thread(panda):
try:
panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
while True:
at = random.randint(1, 2000)
st = (b"test" + os.urandom(10))[0:8]
bus = random.randint(0, 2)
panda.can_send(at, st, bus)
except Exception as e:
print(e)
def read_can_thread(panda):
try:
while True:
panda.can_recv()
except Exception as e:
print(e)
def init_gps(panda):
def add_nmea_checksum(msg):
d = msg[1:]
cs = 0
for i in d:
cs ^= ord(i)
return msg + "*%02X" % cs
ser = PandaSerial(panda, 1, INIT_GPS_BAUD)
# Power cycle the gps by toggling reset
print("Resetting GPS")
panda.set_esp_power(0)
time.sleep(0.5)
panda.set_esp_power(1)
time.sleep(0.5)
# Upping baud rate
print("Upping GPS baud rate")
msg = str.encode(add_nmea_checksum("$PUBX,41,1,0007,0003,%d,0" % GPS_BAUD) + "\r\n")
ser.write(msg)
time.sleep(1) # needs a wait for it to actually send
# Reconnecting with the correct baud
ser = PandaSerial(panda, 1, GPS_BAUD)
# Sending all config messages boardd sends
print("Sending config")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x03\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00\x00\x1E\x7F")
ser.write(b"\xB5\x62\x06\x3E\x00\x00\x44\xD2")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x00\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x35")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x01\x00\x00\x00\xC0\x08\x00\x00\x00\x08\x07\x00\x01\x00\x01\x00\x00\x00\x00\x00\xF4\x80")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x04\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1D\x85")
ser.write(b"\xB5\x62\x06\x00\x00\x00\x06\x18")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x01\x08\x22")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x02\x09\x23")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x03\x0A\x24")
ser.write(b"\xB5\x62\x06\x08\x06\x00\x64\x00\x01\x00\x00\x00\x79\x10")
ser.write(b"\xB5\x62\x06\x24\x24\x00\x05\x00\x04\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x5A\x63")
ser.write(b"\xB5\x62\x06\x1E\x14\x00\x00\x00\x00\x00\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3C\x37")
ser.write(b"\xB5\x62\x06\x24\x00\x00\x2A\x84")
ser.write(b"\xB5\x62\x06\x23\x00\x00\x29\x81")
ser.write(b"\xB5\x62\x06\x1E\x00\x00\x24\x72")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x01\x07\x01\x13\x51")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x15\x01\x22\x70")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x13\x01\x20\x6C")
print("Initialized GPS")
received_messages = 0
received_bytes = 0
send_something = False
def gps_read_thread(panda):
global received_messages, received_bytes, send_something
ser = PandaSerial(panda, 1, GPS_BAUD)
while True:
ret = ser.read(1024)
time.sleep(0.001)
if len(ret):
received_messages += 1
received_bytes += len(ret)
if send_something:
ser.write("test")
send_something = False
CHECK_PERIOD = 5
MIN_BYTES = 10000
MAX_BYTES = 50000
min_failures = 0
max_failures = 0
if __name__ == "__main__":
white_panda, gps_panda = connect()
# Start spamming the CAN buses with the white panda. Also read the messages to add load on the GPS panda
threading.Thread(target=spam_buses_thread, args=(white_panda,)).start()
threading.Thread(target=read_can_thread, args=(gps_panda,)).start()
# Start GPS checking
init_gps(gps_panda)
read_thread = threading.Thread(target=gps_read_thread, args=(gps_panda,))
read_thread.start()
while True:
time.sleep(CHECK_PERIOD)
if(received_bytes < MIN_BYTES):
print("Panda is not sending out enough data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
send_something = True
min_failures += 1
elif(received_bytes > MAX_BYTES):
print("Panda is not sending out too much data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
print("Probably not on the right baud rate, got reset somehow? Resetting...")
max_failures += 1
init_gps(gps_panda)
else:
print("Got " + str(received_messages) + " (" + str(received_bytes) + "B) messages in the last " + str(CHECK_PERIOD) + " seconds.")
if(min_failures > 0):
print("Total min failures: ", min_failures)
if(max_failures > 0):
print("Total max failures: ", max_failures)
received_messages = 0
received_bytes = 0
| mit | 4,856,447,851,349,771,000 | 32.872727 | 192 | 0.66452 | false | 2.359223 | false | false | false |
stephanie-wang/ray | rllib/tests/test_multi_agent_env.py | 1 | 25398 | import gym
import random
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.agents.dqn.dqn_policy import DQNTFPolicy
from ray.rllib.optimizers import (SyncSamplesOptimizer, SyncReplayOptimizer,
AsyncGradientsOptimizer)
from ray.rllib.tests.test_rollout_worker import (MockEnv, MockEnv2, MockPolicy)
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.policy.tests.test_policy import TestPolicy
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.env.base_env import _MultiAgentEnvToBaseEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class BasicMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 25 steps."""
def __init__(self, num):
self.agents = [MockEnv(25) for _ in range(num)]
self.dones = set()
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
self.resetted = False
def reset(self):
self.resetted = True
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
class EarlyDoneMultiAgent(MultiAgentEnv):
"""Env for testing when the env terminates (after agent 0 does)."""
def __init__(self):
self.agents = [MockEnv(3), MockEnv(5)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % len(self.agents)
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % len(self.agents)
done["__all__"] = len(self.dones) == len(self.agents) - 1
return obs, rew, done, info
class RoundRobinMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 5 steps.
On each step() of the env, only one agent takes an action."""
def __init__(self, num, increment_obs=False):
if increment_obs:
# Observations are 0, 1, 2, 3... etc. as time advances
self.agents = [MockEnv2(5) for _ in range(num)]
else:
# Observations are all zeros
self.agents = [MockEnv(5) for _ in range(num)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.num = num
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % self.num
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % self.num
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
def make_multiagent(env_name):
class MultiEnv(MultiAgentEnv):
def __init__(self, num):
self.agents = [gym.make(env_name) for _ in range(num)]
self.dones = set()
self.observation_space = self.agents[0].observation_space
self.action_space = self.agents[0].action_space
def reset(self):
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
return MultiEnv
MultiCartpole = make_multiagent("CartPole-v0")
MultiMountainCar = make_multiagent("MountainCarContinuous-v0")
class TestMultiAgentEnv(unittest.TestCase):
def testBasicMock(self):
env = BasicMultiAgent(4)
obs = env.reset()
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
for _ in range(24):
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(rew, {0: 1, 1: 1, 2: 1, 3: 1})
self.assertEqual(done, {
0: False,
1: False,
2: False,
3: False,
"__all__": False
})
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(done, {
0: True,
1: True,
2: True,
3: True,
"__all__": True
})
def testRoundRobinMock(self):
env = RoundRobinMultiAgent(2)
obs = env.reset()
self.assertEqual(obs, {0: 0})
for _ in range(5):
obs, rew, done, info = env.step({0: 0})
self.assertEqual(obs, {1: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({1: 0})
self.assertEqual(obs, {0: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({0: 0})
self.assertEqual(done["__all__"], True)
def testNoResetUntilPoll(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 1)
self.assertFalse(env.get_unwrapped()[0].resetted)
env.poll()
self.assertTrue(env.get_unwrapped()[0].resetted)
def testVectorizeBasic(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: None, 1: None}, 1: {0: None, 1: None}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
for _ in range(24):
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(
dones, {
0: {
0: True,
1: True,
"__all__": True
},
1: {
0: True,
1: True,
"__all__": True
}
})
# Reset processing
self.assertRaises(
ValueError, lambda: env.send_actions({
0: {
0: 0,
1: 0
},
1: {
0: 0,
1: 0
}
}))
self.assertEqual(env.try_reset(0), {0: 0, 1: 0})
self.assertEqual(env.try_reset(1), {0: 0, 1: 0})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
def testVectorizeRoundRobin(self):
env = _MultiAgentEnvToBaseEnv(lambda v: RoundRobinMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
self.assertEqual(rew, {0: {0: None}, 1: {0: None}})
env.send_actions({0: {0: 0}, 1: {0: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {1: 0}, 1: {1: 0}})
env.send_actions({0: {1: 0}, 1: {1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
def testMultiAgentSample(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch.policy_batches["p0"].count, 150)
self.assertEqual(batch.policy_batches["p1"].count, 100)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist(),
list(range(25)) * 6)
def testMultiAgentSampleSyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True,
remote_env_batch_wait_ms=99999999)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleAsyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleWithHorizon(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
episode_horizon=10, # test with episode horizon set
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
def testSampleFromEarlyDoneEnv(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: EarlyDoneMultiAgent(),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_mode="complete_episodes",
batch_steps=1)
self.assertRaisesRegexp(ValueError,
".*don't have a last observation.*",
lambda: ev.sample())
def testMultiAgentSampleRoundRobin(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(10)
ev = RolloutWorker(
env_creator=lambda _: RoundRobinMultiAgent(5, increment_obs=True),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
# since we round robin introduce agents into the env, some of the env
# steps don't count as proper transitions
self.assertEqual(batch.policy_batches["p0"].count, 42)
self.assertEqual(batch.policy_batches["p0"]["obs"].tolist()[:10], [
one_hot(0, 10),
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["new_obs"].tolist()[:10], [
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
one_hot(5, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["rewards"].tolist()[:10],
[100, 100, 100, 100, 0] * 2)
self.assertEqual(batch.policy_batches["p0"]["dones"].tolist()[:10],
[False, False, False, False, True] * 2)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist()[:10],
[4, 9, 14, 19, 24, 5, 10, 15, 20, 25])
def test_custom_rnn_state_values(self):
h = {"some": {"arbitrary": "structure", "here": [1, 2, 3]}}
class StatefulPolicy(TestPolicy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [0] * len(obs_batch), [[h] * len(obs_batch)], {}
def get_initial_state(self):
return [{}] # empty dict
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=StatefulPolicy,
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch["state_in_0"][0], {})
self.assertEqual(batch["state_out_0"][0], h)
self.assertEqual(batch["state_in_0"][1], h)
self.assertEqual(batch["state_out_0"][1], h)
def test_returning_model_based_rollouts_data(self):
class ModelBasedPolicy(PGTFPolicy):
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
# Pretend we did a model-based rollout and want to return
# the extra trajectory.
builder = episodes[0].new_batch_builder()
rollout_id = random.randint(0, 10000)
for t in range(5):
builder.add_values(
agent_id="extra_0",
policy_id="p1", # use p1 so we can easily check it
t=t,
eps_id=rollout_id, # new id for each rollout
obs=obs_batch[0],
actions=0,
rewards=0,
dones=t == 4,
infos={},
new_obs=obs_batch[0])
batch = builder.build_and_reset(episode=None)
episodes[0].add_extra_batch(batch)
# Just return zeros for actions
return [0] * len(obs_batch), [], {}
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
ev = RolloutWorker(
env_creator=lambda _: MultiCartpole(2),
policy={
"p0": (ModelBasedPolicy, obs_space, act_space, {}),
"p1": (ModelBasedPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch.policy_batches["p0"].count, 10)
self.assertEqual(batch.policy_batches["p1"].count, 25)
def test_train_multi_cartpole_single_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
pg = PGTrainer(env="multi_cartpole", config={"num_workers": 0})
for i in range(100):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 50 * n:
return
raise Exception("failed to improve reward")
def test_train_multi_cartpole_multi_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
single_env = gym.make("CartPole-v0")
def gen_policy():
config = {
"gamma": random.choice([0.5, 0.8, 0.9, 0.95, 0.99]),
"n_step": random.choice([1, 2, 3, 4, 5]),
}
obs_space = single_env.observation_space
act_space = single_env.action_space
return (None, obs_space, act_space, config)
pg = PGTrainer(
env="multi_cartpole",
config={
"num_workers": 0,
"multiagent": {
"policies": {
"policy_1": gen_policy(),
"policy_2": gen_policy(),
},
"policy_mapping_fn": lambda agent_id: "policy_1",
},
})
# Just check that it runs without crashing
for i in range(10):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_1") in [0, 1])
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_2") in [0, 1])
self.assertRaises(
KeyError,
lambda: pg.compute_action([0, 0, 0, 0], policy_id="policy_3"))
def _testWithOptimizer(self, optimizer_cls):
n = 3
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
dqn_config = {"gamma": 0.95, "n_step": 3}
if optimizer_cls == SyncReplayOptimizer:
# TODO: support replay with non-DQN graphs. Currently this can't
# happen since the replay buffer doesn't encode extra fields like
# "advantages" that PG uses.
policies = {
"p1": (DQNTFPolicy, obs_space, act_space, dqn_config),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
else:
policies = {
"p1": (PGTFPolicy, obs_space, act_space, {}),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: ["p1", "p2"][agent_id % 2],
batch_steps=50)
if optimizer_cls == AsyncGradientsOptimizer:
def policy_mapper(agent_id):
return ["p1", "p2"][agent_id % 2]
remote_workers = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=policy_mapper,
batch_steps=50)
]
else:
remote_workers = []
workers = WorkerSet._from_existing(worker, remote_workers)
optimizer = optimizer_cls(workers)
for i in range(200):
worker.foreach_policy(lambda p, _: p.set_epsilon(
max(0.02, 1 - i * .02))
if isinstance(p, DQNTFPolicy) else None)
optimizer.step()
result = collect_metrics(worker, remote_workers)
if i % 20 == 0:
def do_update(p):
if isinstance(p, DQNTFPolicy):
p.update_target()
worker.foreach_policy(lambda p, _: do_update(p))
print("Iter {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
print(result)
raise Exception("failed to improve reward")
def test_multi_agent_sync_optimizer(self):
self._testWithOptimizer(SyncSamplesOptimizer)
def test_multi_agent_async_gradients_optimizer(self):
self._testWithOptimizer(AsyncGradientsOptimizer)
def test_multi_agent_replay_optimizer(self):
self._testWithOptimizer(SyncReplayOptimizer)
def test_train_multi_cartpole_many_policies(self):
n = 20
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
policies = {}
for i in range(20):
policies["pg_{}".format(i)] = (PGTFPolicy, obs_space, act_space,
{})
policy_ids = list(policies.keys())
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: random.choice(policy_ids),
batch_steps=100)
workers = WorkerSet._from_existing(worker, [])
optimizer = SyncSamplesOptimizer(workers)
for i in range(100):
optimizer.step()
result = collect_metrics(worker)
print("Iteration {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
raise Exception("failed to improve reward")
if __name__ == "__main__":
ray.init(num_cpus=4)
unittest.main(verbosity=2)
| apache-2.0 | -3,413,724,919,242,403,000 | 37.077961 | 79 | 0.495551 | false | 3.699097 | true | false | false |
dgketchum/MT_Rsense | utils/raster_tools.py | 1 | 5477 | # ===============================================================================
# Copyright 2017 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
"""
The purpose of this module is to provide some simple tools needed for raster processing.
"""
import os
from numpy import array, asarray
from numpy.ma import masked_where, nomask
from osgeo import gdal, ogr
import spatial_reference_tools as srt
def raster_to_array(input_raster_path, raster=None, band=1):
"""
Convert .tif raster into a numpy numerical array.
:param input_raster_path: Path to raster.
:param raster: Raster name with *.tif
:param band: Band of raster sought.
:return: Numpy array.
"""
try:
raster_open = gdal.Open(os.path.join(input_raster_path, raster))
except TypeError:
raster_open = gdal.Open(input_raster_path)
except AttributeError:
raster_open = gdal.Open(input_raster_path)
ras = array(raster_open.GetRasterBand(band).ReadAsArray(), dtype=float)
return ras
def get_polygon_from_raster(raster):
tile_id = os.path.basename(raster)
# print 'tile number: {}'.format(tile_id)
# print 'get poly tile: {}'.format(tile_id)
# get raster geometry
tile = gdal.Open(raster)
# print 'tile is type: {}'.format(tile)
transform = tile.GetGeoTransform()
pixel_width = transform[1]
pixel_height = transform[5]
cols = tile.RasterXSize
rows = tile.RasterYSize
x_left = transform[0]
y_top = transform[3]
x_right = x_left + cols * pixel_width
y_bottom = y_top - rows * pixel_height
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x_left, y_top)
ring.AddPoint(x_left, y_bottom)
ring.AddPoint(x_right, y_top)
ring.AddPoint(x_right, y_bottom)
ring.AddPoint(x_left, y_top)
raster_geo = ogr.Geometry(ogr.wkbPolygon)
raster_geo.AddGeometry(ring)
# print 'found poly tile geo: {}'.format(raster_geo)
return raster_geo
def find_poly_ras_intersect(shape, raster_dir, extension='.tif'):
""" Finds all the tiles falling within raster object
the get shape geometry should be seperated from the intesect check,
currently causes a exit code 139 on unix box
:param polygon:
:param extension:
:param raster_dir:
"""
print 'starting shape: {}'.format(shape)
# get vector geometry
if not os.path.isfile(shape):
raise NotImplementedError('Shapefile not found')
polygon = ogr.Open(shape)
layer = polygon.GetLayer()
feature = layer.GetFeature(0)
vector_geo = feature.GetGeometryRef()
# print 'vector geometry: {}'.format(vector_geo)
tiles = [os.path.join(raster_dir, x) for x in
os.listdir(os.path.join(raster_dir)) if x.endswith(extension)]
raster_list = []
for tile in tiles:
print tile, srt.tif_proj4_spatial_reference(tile)
if srt.check_same_reference_system(shape, tile):
raster_geo = get_polygon_from_raster(tile)
if raster_geo.Intersect(vector_geo):
print 'tile: {} intersects {}'.format(os.path.basename(tile), os.path.basename(shape))
raster_list.append(tile)
return raster_list
def apply_mask(mask_path, arr):
out = None
file_name = next((fn for fn in os.listdir(mask_path) if fn.endswith('.tif')), None)
if file_name is not None:
mask = raster_to_array(mask_path, file_name)
idxs = asarray(mask, dtype=bool)
out = arr[idxs].flatten()
return out
def remake_array(mask_path, arr):
out = None
file_name = next((filename for filename in os.listdir(mask_path) if filename.endswith('.tif')), None)
if file_name is not None:
mask_array = raster_to_array(mask_path, file_name)
masked_arr = masked_where(mask_array == 0, mask_array)
masked_arr[~masked_arr.mask] = arr.ravel()
masked_arr.mask = nomask
arr = masked_arr.filled(0)
out = arr
return out
def array_to_raster(save_array, out_path, geo):
key = None
pass
driver = gdal.GetDriverByName('GTiff')
out_data_set = driver.Create(out_path, geo['cols'], geo['rows'],
geo['bands'], geo['data_type'])
out_data_set.SetGeoTransform(geo['geotransform'])
out_data_set.SetProjection(geo['projection'])
output_band = out_data_set.GetRasterBand(1)
output_band.WriteArray(save_array, 0, 0)
print 'written array {} mean value: {}'.format(key, save_array.mean())
return None
if __name__ == '__main__':
pass
# home = os.path.expanduser('~')
# terrain = os.path.join(home, 'images', 'terrain', 'ned_tiles', 'dem')
# shape = os.path.join(home, 'images', 'vector_data', 'wrs2_descending',
# 'wrs2_036029_Z12.shp')
# find_poly_ras_intersect(shape, terrain)
# =================================== EOF =========================
| apache-2.0 | -4,436,257,168,152,308,700 | 32.808642 | 105 | 0.627898 | false | 3.624752 | false | false | false |
szecsi/Gears | GearsPy/Project/Components/Tone/Erf.py | 1 | 1254 | import Gears as gears
from .. import *
class Erf(Component) :
def applyWithArgs(
self,
stimulus,
*,
toneRangeMean = 0.5,
toneRangeVar = 0.3,
dynamic = False
) :
self.stimulus = stimulus;
try:
trmean = toneRangeMean.value
except:
trmean = toneRangeMean
try:
trvar = toneRangeVar.value
except:
trvar = toneRangeVar
stimulus.setToneMappingErf(trmean, trvar, dynamic)
self.registerInteractiveControls(
None, stimulus,
"",
toneRangeMean = toneRangeMean ,
toneRangeVar = toneRangeVar ,
)
def update(self, **kwargs):
for key, control in self.interactiveControls.items() :
if key == 'toneRangeMean' :
try:
self.stimulus.toneRangeMean = control.value
except:
self.stimulus.toneRangeMean = control
if key == 'toneRangeVar' :
try:
self.stimulus.toneRangeVar = control.value
except:
self.stimulus.toneRangeVar = control
| gpl-2.0 | 1,520,837,245,257,952,300 | 25.125 | 63 | 0.495215 | false | 4.309278 | false | false | false |
johan92/fpga-quadtree | tb/t.py | 1 | 2226 | import random
def write_stages( stages_l, fname ):
f = open( fname, "w" )
for (i, stage) in enumerate( stages_l ):
for (addr, data) in enumerate( stage ):
wr_str = str(i) + " " + str( addr )
for d in data:
wr_str = wr_str + " " + str( d )
f.write("%s\n" % wr_str)
f.close()
def write_table( table_l, fname ):
f = open( fname, "w" )
for (addr, row) in enumerate( table_l ):
for (i, d) in enumerate( row ):
(val, en) = d
if en == True:
wr_str = str(addr) + " " + str(i) + " " + str(val)
f.write("%s\n" % wr_str)
f.close()
if __name__ == "__main__":
STAGES = 4
D_CNT = 4
MAX_NUM = 255
TOTAL_NUMS = 101
LAST_TABLE_ADDR_CNT = pow(4, STAGES)
MAX_HOLD_DATA_CNT = LAST_TABLE_ADDR_CNT * 4
print "maximum data %d" % ( MAX_HOLD_DATA_CNT )
all_nums_s = set()
all_nums_l = list()
while ( len( all_nums_s ) < TOTAL_NUMS ) and ( len( all_nums_s ) < MAX_HOLD_DATA_CNT ):
r = random.randint(0, MAX_NUM)
all_nums_s.add( r )
all_nums_l = list( sorted( all_nums_s ) )
print all_nums_l
match_table = list()
for i in xrange( LAST_TABLE_ADDR_CNT ):
match_table.append( list() )
for j in xrange( D_CNT ):
match_table[i].append( ( MAX_NUM, False ) )
for (i, n) in enumerate( all_nums_l ):
addr = i / D_CNT
pos = i % D_CNT
match_table[addr][pos] = ( n, True )
for i in match_table:
print i
stages_l = list()
for i in xrange( STAGES ):
stages_l.append( list() )
for j in xrange( pow(4, i ) ):
stages_l[i].append( [MAX_NUM, MAX_NUM, MAX_NUM] )
print stages_l
for stage in reversed( xrange( STAGES ) ):
if stage == ( STAGES - 1 ):
for (i, row) in enumerate( match_table ):
if i % 4 != 3:
# max value in last bucket
(m, en) = row[D_CNT-1]
#print stage, i/4, i%4
stages_l[ stage ][i/4][i%4] = m
else:
for (i, row) in enumerate( stages_l[ stage + 1 ] ):
if i % 4 != 3:
m = row[2]
stages_l[ stage ][ i / 4 ][ i % 4 ] = m
write_stages( stages_l, "tree" )
write_table( match_table, "table" )
| mit | -1,756,596,652,279,596,800 | 25.188235 | 89 | 0.504492 | false | 2.792974 | false | false | false |
Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/action/twikidraw.py | 2 | 9397 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - twikidraw
This action is used to call twikidraw
@copyright: 2001 by Ken Sugino ([email protected]),
2001-2004 by Juergen Hermann <[email protected]>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE ([email protected]),
2007-2008 MoinMoin:ThomasWaldmann,
2005-2009 MoinMoin:ReimarBauer,
@license: GNU GPL, see COPYING for details.
"""
import os, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import wikiutil, config
from MoinMoin.action import AttachFile, do_show
from MoinMoin.action.AttachFile import _write_stream
from MoinMoin.security.textcha import TextCha
action_name = __name__.split('.')[-1]
def gedit_drawing(self, url, text, **kw):
# This is called for displaying a drawing image by gui editor.
_ = self.request.getText
# TODO: this 'text' argument is kind of superfluous, replace by using alt=... kw arg
# ToDo: make this clickable for the gui editor
if 'alt' not in kw or not kw['alt']:
kw['alt'] = text
# we force the title here, needed later for html>wiki converter
kw['title'] = "drawing:%s" % wikiutil.quoteWikinameURL(url)
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request)
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
kw['src'] = ci.member_url('drawing.png')
return self.image(**kw)
def attachment_drawing(self, url, text, **kw):
# This is called for displaying a clickable drawing image by text_html formatter.
# XXX text arg is unused!
_ = self.request.getText
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request, do='modify')
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
title = _('Edit drawing %(filename)s (opens in new window)') % {'filename': self.text(containername)}
kw['src'] = src = ci.member_url('drawing.png')
kw['css'] = 'drawing'
try:
mapfile = ci.get('drawing.map')
map = mapfile.read()
mapfile.close()
map = map.decode(config.charset)
except (KeyError, IOError, OSError):
map = u''
if map:
# we have a image map. inline it and add a map ref to the img tag
# we have also to set a unique ID
mapid = u'ImageMapOf%s%s' % (self.request.uid_generator(pagename), drawing)
map = map.replace(u'%MAPNAME%', mapid)
# add alt and title tags to areas
map = re.sub(ur'href\s*=\s*"((?!%TWIKIDRAW%).+?)"', ur'href="\1" alt="\1" title="\1"', map)
map = map.replace(u'%TWIKIDRAW%"', u'%s" alt="%s" title="%s"' % (
wikiutil.escape(drawing_url, 1), title, title))
# unxml, because 4.01 concrete will not validate />
map = map.replace(u'/>', u'>')
title = _('Clickable drawing: %(filename)s') % {'filename': self.text(containername)}
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
kw['usemap'] = '#'+mapid
return self.url(1, drawing_url) + map + self.image(**kw) + self.url(0)
else:
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
return self.url(1, drawing_url) + self.image(**kw) + self.url(0)
class TwikiDraw(object):
""" twikidraw action """
def __init__(self, request, pagename, target):
self.request = request
self.pagename = pagename
self.target = target
def save(self):
request = self.request
_ = request.getText
if not wikiutil.checkTicket(request, request.args.get('ticket', '')):
return _('Please use the interactive user interface to use action %(actionname)s!') % {'actionname': 'twikidraw.save' }
pagename = self.pagename
target = self.target
if not request.user.may.write(pagename):
return _('You are not allowed to save a drawing on this page.')
if not target:
return _("Empty target name given.")
file_upload = request.files.get('filepath')
if not file_upload:
# This might happen when trying to upload file names
# with non-ascii characters on Safari.
return _("No file content. Delete non ASCII characters from the file name and try again.")
filename = request.form['filename']
basepath, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
ci = AttachFile.ContainerItem(request, pagename, target)
filecontent = file_upload.stream
content_length = None
if ext == '.draw': # TWikiDraw POSTs this first
AttachFile._addLogEntry(request, 'ATTDRW', pagename, target)
ci.truncate()
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.replace("\r", "")
elif ext == '.map':
# touch attachment directory to invalidate cache if new map is saved
attach_dir = AttachFile.getAttachDir(request, pagename)
os.utime(attach_dir, None)
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.strip()
else:
#content_length = file_upload.content_length
# XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj,
# without reading it into memory completely:
filecontent = filecontent.read()
ci.put('drawing' + ext, filecontent, content_length)
def render(self):
request = self.request
_ = request.getText
pagename = self.pagename
target = self.target
if not request.user.may.read(pagename):
return _('You are not allowed to view attachments of this page.')
if not target:
return _("Empty target name given.")
ci = AttachFile.ContainerItem(request, pagename, target)
if ci.exists():
drawurl = ci.member_url('drawing.draw')
pngurl = ci.member_url('drawing.png')
else:
drawurl = 'drawing.draw'
pngurl = 'drawing.png'
pageurl = request.href(pagename)
saveurl = request.href(pagename, action=action_name, do='save', target=target,
ticket=wikiutil.createTicket(request))
helpurl = request.href("HelpOnActions/AttachFile")
html = """
<p>
<applet code="CH.ifa.draw.twiki.TWikiDraw.class"
archive="%(htdocs)s/applets/TWikiDrawPlugin/twikidraw.jar" width="640" height="480">
<param name="drawpath" value="%(drawurl)s">
<param name="pngpath" value="%(pngurl)s">
<param name="savepath" value="%(saveurl)s">
<param name="basename" value="%(basename)s">
<param name="viewpath" value="%(pageurl)s">
<param name="helppath" value="%(helpurl)s">
<strong>NOTE:</strong> You need a Java enabled browser to edit the drawing.
</applet>
</p>
""" % dict(
htdocs=request.cfg.url_prefix_static,
basename=wikiutil.escape(target, 1),
drawurl=wikiutil.escape(drawurl, 1),
pngurl=wikiutil.escape(pngurl, 1),
pageurl=wikiutil.escape(pageurl, 1),
saveurl=wikiutil.escape(saveurl, 1),
helpurl=wikiutil.escape(helpurl, 1),
)
title = "%s %s:%s" % (_("Edit drawing"), pagename, target)
request.theme.send_title(title, page=request.page, pagename=pagename)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.rawHTML(html))
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def execute(pagename, request):
target = request.values.get('target')
target = wikiutil.taintfilename(target)
twd = TwikiDraw(request, pagename, target)
do = request.values.get('do')
if do == 'save':
msg = twd.save()
else:
msg = twd.render()
if msg:
request.theme.add_msg(msg, 'error')
do_show(pagename, request)
| mit | -8,768,357,111,359,614,000 | 39.950893 | 131 | 0.607002 | false | 3.695242 | false | false | false |
google/fuzzbench | common/test_gsutil.py | 1 | 3305 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gsutil.py."""
from unittest import mock
import pytest
from common import gsutil
from test_libs import utils as test_utils
def test_gsutil_command():
"""Tests gsutil_command works as expected."""
arguments = ['hello']
with test_utils.mock_popen_ctx_mgr() as mocked_popen:
gsutil.gsutil_command(arguments)
assert mocked_popen.commands == [['gsutil'] + arguments]
@pytest.mark.parametrize(('must_exist'), [True, False])
def test_ls_must_exist(must_exist):
"""Tests that ls makes a correct call to new_process.execute when
must_exist is specified."""
with mock.patch('common.new_process.execute') as mocked_execute:
gsutil.ls('gs://hello', must_exist=must_exist)
mocked_execute.assert_called_with(['gsutil', 'ls', 'gs://hello'],
expect_zero=must_exist)
class TestGsutilRsync:
"""Tests for gsutil_command works as expected."""
SRC = '/src'
DST = 'gs://dst'
def test_rsync(self):
"""Tests that rsync works as intended."""
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST)
mocked_gsutil_command.assert_called_with(
['rsync', '-d', '-r', '/src', 'gs://dst'], parallel=False)
def test_gsutil_options(self):
"""Tests that rsync works as intended when supplied a gsutil_options
argument."""
flag = '-flag'
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, gsutil_options=[flag])
assert flag == mocked_gsutil_command.call_args_list[0][0][0][0]
def test_options(self):
"""Tests that rsync works as intended when supplied a gsutil_options
argument."""
flag = '-flag'
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, options=[flag])
assert flag in mocked_gsutil_command.call_args_list[0][0][0]
@pytest.mark.parametrize(('kwarg_for_rsync', 'flag'), [('delete', '-d'),
('recursive', '-r')])
def test_no_flag(self, kwarg_for_rsync, flag):
"""Tests that rsync works as intended when caller specifies not
to use specific flags."""
kwargs_for_rsync = {}
kwargs_for_rsync[kwarg_for_rsync] = False
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, **kwargs_for_rsync)
assert flag not in mocked_gsutil_command.call_args_list[0][0][0]
| apache-2.0 | 1,608,385,825,253,286,700 | 38.819277 | 80 | 0.63177 | false | 3.883666 | true | false | false |
Azure/azure-storage-python | azure-storage-queue/azure/storage/queue/_serialization.py | 1 | 2173 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
try:
from cStringIO import StringIO as BytesIO
except:
from StringIO import StringIO as BytesIO
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from azure.storage.common._common_conversion import (
_str,
)
from ._encryption import (
_encrypt_queue_message,
)
def _get_path(queue_name=None, include_messages=None, message_id=None):
'''
Creates the path to access a queue resource.
queue_name:
Name of queue.
include_messages:
Whether or not to include messages.
message_id:
Message id.
'''
if queue_name and include_messages and message_id:
return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
if queue_name and include_messages:
return '/{0}/messages'.format(_str(queue_name))
elif queue_name:
return '/{0}'.format(_str(queue_name))
else:
return '/'
def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
'''
<?xml version="1.0" encoding="utf-8"?>
<QueueMessage>
<MessageText></MessageText>
</QueueMessage>
'''
queue_message_element = ETree.Element('QueueMessage')
# Enabled
message_text = encode_function(message_text)
if key_encryption_key is not None:
message_text = _encrypt_queue_message(message_text, key_encryption_key)
ETree.SubElement(queue_message_element, 'MessageText').text = message_text
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
output = stream.getvalue()
finally:
stream.close()
return output
| mit | 1,446,944,203,333,926,400 | 28.767123 | 116 | 0.6185 | false | 4.146947 | false | false | false |
jlopezpena/bearcart | bearcart/bearcart.py | 1 | 9096 | # -*- coding: utf-8 -*-
'''
Rickshaw
-------
Python Pandas + Rickshaw.js
'''
from __future__ import print_function
from __future__ import division
import time
import json
import os
from collections import defaultdict
from pkg_resources import resource_string
import pandas as pd
import numpy as np
from jinja2 import Environment, PackageLoader
class Chart(object):
'''Visualize Pandas Timeseries with Rickshaw.js'''
def __init__(self, data=None, width=750, height=400, plt_type='line',
colors=None, x_time=True, y_zero=False, **kwargs):
'''Generate a Rickshaw time series visualization with Pandas
Series and DataFrames.
The bearcart Chart generates the Rickshaw visualization of a Pandas
timeseries Series or DataFrame. The only required parameters are
data, width, height, and type. Colors is an optional parameter;
bearcart will default to the Rickshaw spectrum14 color palette if
none are passed. Keyword arguments can be passed to disable the
following components:
- x_axis
- y_axis
- hover
- legend
Parameters
----------
data: Pandas Series or DataFrame, default None
The Series or Dataframe must have a Datetime index.
width: int, default 960
Width of the chart in pixels
height: int, default 500
Height of the chart in pixels
plt_type: string, default 'line'
Must be one of 'line', 'area', 'scatterplot' or 'bar'
colors: dict, default None
Dict with keys matching DataFrame or Series column names, and hex
strings for colors
x_time: boolean, default True
If passed as False, the x-axis will have non-time values
y_zero: boolean, default False
The y-axis defaults to auto scaling. Pass True to set the min
y-axis value to 0.
kwargs:
Keyword arguments that, if passed as False, will disable the
following components: x_axis, y_axis, hover, legend
Returns
-------
Bearcart object
Examples
--------
>>>vis = bearcart.Chart(data=df, width=800, height=300, type='area')
>>>vis = bearcart.Chart(data=series,type='scatterplot',
colors={'Data 1': '#25aeb0',
'Data 2': '#114e4f'})
#Disable x_axis and legend
>>>vis = bearcart.Chart(data=df, x_axis=False, legend=False)
'''
self.defaults = {'x_axis': True, 'y_axis': True, 'hover': True,
'legend': True}
self.env = Environment(loader=PackageLoader('bearcart', 'templates'))
#Colors need to be js strings
if colors:
self.colors = {key: "'{0}'".format(value)
for key, value in colors.iteritems()}
else:
self.colors = None
self.x_axis_time = x_time
self.renderer = plt_type
self.width = width
self.height = height
self.y_zero = y_zero
self.template_vars = {}
#Update defaults for passed kwargs
for key, value in kwargs.iteritems():
self.defaults[key] = value
#Get templates for graph elements
for att, val in self.defaults.iteritems():
render_vars = {}
if val:
if not self.x_axis_time:
if att == 'x_axis':
att = 'x_axis_num'
elif att == 'hover':
render_vars = {'x_hover': 'xFormatter: function(x)'
'{return Math.floor(x / 10) * 10}'}
temp = self.env.get_template(att + '.js')
self.template_vars.update({att: temp.render(render_vars)})
#Transform data into Rickshaw-happy JSON format
if data is not None:
self.transform_data(data)
def transform_data(self, data):
'''Transform Pandas Timeseries into JSON format
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series must have datetime index
Returns
-------
JSON to object.json_data
Example
-------
>>>vis.transform_data(df)
>>>vis.json_data
'''
def type_check(value):
'''Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explictly converted.'''
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
objectify = lambda dat: [{"x": type_check(x), "y": type_check(y)}
for x, y in dat.iteritems()]
self.raw_data = data
if isinstance(data, pd.Series):
data.name = data.name or 'data'
self.json_data = [{'name': data.name, 'data': objectify(data)}]
elif isinstance(data, pd.DataFrame):
self.json_data = [{'name': x[0], 'data': objectify(x[1])}
for x in data.iteritems()]
def _build_graph(self):
'''Build Rickshaw graph syntax with all data'''
#Set palette colors if necessary
if not self.colors:
self.palette = self.env.get_template('palette.js')
self.template_vars.update({'palette': self.palette.render()})
self.colors = {x['name']: 'palette.color()' for x in self.json_data}
template_vars = []
for index, dataset in enumerate(self.json_data):
group = 'datagroup' + str(index)
template_vars.append({'name': str(dataset['name']),
'color': self.colors[dataset['name']],
'data': 'json[{0}].data'.format(index)})
variables = {'dataset': template_vars, 'width': self.width,
'height': self.height, 'render': self.renderer}
if not self.y_zero:
variables.update({'min': "min: 'auto',"})
graph = self.env.get_template('graph.js')
self.template_vars.update({'graph': graph.render(variables)})
def create_chart(self, html_path='index.html', data_path='data.json',
js_path='rickshaw.min.js', css_path='rickshaw.min.css',
html_prefix=''):
'''Save bearcart output to HTML and JSON.
Parameters
----------
html_path: string, default 'index.html'
Path for html output
data_path: string, default 'data.json'
Path for data JSON output
js_path: string, default 'rickshaw.min.js'
If passed, the Rickshaw javascript library will be saved to the
path. The file must be named "rickshaw.min.js"
css_path: string, default 'rickshaw.min.css'
If passed, the Rickshaw css library will be saved to the
path. The file must be named "rickshaw.min.css"
html_prefix: Prefix path to be appended to all the other paths for file
creation, but not in the generated html file. This is needed if the
html file does not live in the same folder as the running python
script.
Returns
-------
HTML, JSON, JS, and CSS
Example
--------
>>>vis.create_chart(html_path='myvis.html', data_path='visdata.json'),
js_path='rickshaw.min.js',
cs_path='rickshaw.min.css')
'''
self.template_vars.update({'data_path': str(data_path),
'js_path': js_path,
'css_path': css_path})
self._build_graph()
html = self.env.get_template('bcart_template.html')
self.HTML = html.render(self.template_vars)
with open(os.path.join(html_prefix, html_path), 'w') as f:
f.write(self.HTML)
with open(os.path.join(html_prefix, data_path), 'w') as f:
json.dump(self.json_data, f, sort_keys=True, indent=4,
separators=(',', ': '))
if js_path:
js = resource_string('bearcart', 'rickshaw.min.js')
with open(os.path.join(html_prefix, js_path), 'w') as f:
f.write(js)
if css_path:
css = resource_string('bearcart', 'rickshaw.min.css')
with open(os.path.join(html_prefix, css_path), 'w') as f:
f.write(css)
| mit | -3,603,010,763,138,660,400 | 36.126531 | 80 | 0.542656 | false | 4.234637 | false | false | false |
TestInABox/stackInABox | stackinabox/util/responses/core.py | 2 | 2320 | """
Stack-In-A-Box: Python Responses Support
"""
from __future__ import absolute_import
import logging
import re
import responses
from stackinabox.stack import StackInABox
from stackinabox.util import deprecator
from stackinabox.util.tools import CaseInsensitiveDict
logger = logging.getLogger(__name__)
def responses_callback(request):
"""Responses Request Handler.
Converts a call intercepted by Responses to
the Stack-In-A-Box infrastructure
:param request: request object
:returns: tuple - (int, dict, string) containing:
int - the HTTP response status code
dict - the headers for the HTTP response
string - HTTP string response
"""
method = request.method
headers = CaseInsensitiveDict()
request_headers = CaseInsensitiveDict()
request_headers.update(request.headers)
request.headers = request_headers
uri = request.url
return StackInABox.call_into(method,
request,
uri,
headers)
def registration(uri):
"""Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
"""
# log the URI that is used to access the Stack-In-A-Box services
logger.debug('Registering Stack-In-A-Box at {0} under Python Responses'
.format(uri))
# tell Stack-In-A-Box what URI to match with
StackInABox.update_uri(uri)
# Build the regex for the URI and register all HTTP verbs
# with Responses
regex = re.compile(r'(http)?s?(://)?{0}:?(\d+)?/'.format(uri),
re.I)
METHODS = [
responses.DELETE,
responses.GET,
responses.HEAD,
responses.OPTIONS,
responses.PATCH,
responses.POST,
responses.PUT
]
for method in METHODS:
responses.add_callback(method,
regex,
callback=responses_callback)
@deprecator.DeprecatedInterface("responses_registration", "registration")
def responses_registration(uri):
return registration(uri)
| apache-2.0 | 1,439,319,332,521,782,800 | 26.951807 | 75 | 0.620259 | false | 4.35272 | false | false | false |
Mirantis/mos-tempest-runner | helpers/subunit_shouldfail_filter.py | 3 | 3749 | #!/usr/bin/env python
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import optparse
import sys
from subunit import v2 as subunit_v2
from subunit.v2 import ByteStreamToStreamResult
from testtools import StreamResult
import yaml
def make_options():
parser = optparse.OptionParser(description=__doc__)
parser.add_option(
"--shouldfail-file",
type=str,
help="File with list of test ids that are expected to fail; "
"on failure their result will be changed to xfail; on success "
"they will be changed to error.",
dest="shouldfail_file",
action="append")
return parser
class ProcessedStreamResult(StreamResult):
def __init__(self, output, shouldfail):
self.output = output
self.shouldfail = shouldfail
def startTestRun(self):
self.output.startTestRun()
def stopTestRun(self):
self.output.stopTestRun()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
if ((test_status in ['fail', 'success', 'xfail', 'uxsuccess', 'skip'])
and (test_id in self.shouldfail)):
if test_status == 'fail':
test_status = 'xfail'
elif test_status == 'success':
test_status = 'uxsuccess'
if self.shouldfail[test_id]:
self.output.status(test_id=test_id,
test_tags=test_tags,
file_name='shouldfail-info',
mime_type='text/plain; charset="utf8"',
file_bytes=self.shouldfail[test_id],
route_code=route_code,
timestamp=timestamp)
self.output.status(test_id=test_id, test_status=test_status,
test_tags=test_tags, runnable=runnable,
file_name=file_name, file_bytes=file_bytes,
mime_type=mime_type, route_code=route_code,
timestamp=timestamp)
def read_shouldfail_file(options):
shouldfail = {}
for path in options.shouldfail_file or ():
f = open(path, 'rb')
try:
content = yaml.safe_load(f)
for item in content:
if not isinstance(item, dict):
shouldfail[item] = None
else:
shouldfail.update(item)
finally:
f.close()
return shouldfail
def main():
parser = make_options()
(options, args) = parser.parse_args()
output = subunit_v2.StreamResultToBytes(sys.stdout)
shouldfail = read_shouldfail_file(options)
result = ProcessedStreamResult(output, shouldfail)
converter = ByteStreamToStreamResult(source=sys.stdin,
non_subunit_name='process-stderr')
result.startTestRun()
converter.run(result)
result.stopTestRun()
if __name__ == '__main__':
main()
| apache-2.0 | 392,869,155,237,528,200 | 32.473214 | 78 | 0.583889 | false | 4.198208 | true | false | false |
gregthedoe/androtoolbox | androtoolbox/log.py | 1 | 1057 | from .adb import adb
_LOG_TAG_PROPERTY = 'log.tag.{tag}'
LOG_LEVELS = ('VERBOSE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'ASSERT')
def is_valid_log_level(level):
return level.upper() in LOG_LEVELS
def set_loggable_level_for_tag(tag, level='VERBOSE'):
"""
Set the minimum loggable level for a tag.
:param tag: TAG name
:param level: Log level.
"""
level = level.upper()
if not is_valid_log_level(level):
raise ValueError("Unknown log level %s" % level)
return adb.set_property(_LOG_TAG_PROPERTY.format(tag=tag), level)
def set_loggable_level_for_tags(tags, default_level='VERBOSE'):
"""
Set the minimum log level for a set of tags.
:param tags: A mapping of tags and their minimum loggable level.
:param default_level: If `tags` is a list use this level as the default.
"""
try:
for tag, level in tags.iteritems():
set_loggable_level_for_tag(tag, level)
except AttributeError:
for tag in tags:
set_loggable_level_for_tag(tag, default_level)
| mit | -7,698,050,352,898,194,000 | 27.567568 | 76 | 0.641438 | false | 3.409677 | false | false | false |
alejo8591/maker | projects/admin.py | 1 | 1464 | # encoding: utf-8
# Copyright 2013 maker
# License
"""
Project management: admin page
"""
from maker.projects.models import Project, Milestone, Task, TaskTimeSlot, TaskStatus
from django.contrib import admin
class ProjectAdmin(admin.ModelAdmin):
""" Project admin """
list_display = ('name', 'details', 'parent', 'manager', 'client')
search_fields = ['name']
class MilestoneAdmin(admin.ModelAdmin):
""" Milestone admin """
list_display = ('name', 'details', 'project')
search_fields = ['name']
class TaskAdmin(admin.ModelAdmin):
""" Task admin """
list_display = ('name', 'details', 'project', 'priority', 'parent', 'milestone', 'caller')
search_fields = ['name']
class TaskStatusAdmin(admin.ModelAdmin):
""" Task status admin """
list_display = ('name', 'details')
search_fields = ['name']
class TaskTimeSlotAdmin(admin.ModelAdmin):
""" Task time slot admin """
list_display = ('task', 'time_from', 'time_to', 'timezone', 'details')
date_hierarchy = 'time_from'
search_fields = ['task']
class TaskRecordAdmin(admin.ModelAdmin):
""" Task record admin """
list_display = ('task', 'record_type')
list_filter = ['record_type']
admin.site.register(Project, ProjectAdmin)
admin.site.register(Milestone, MilestoneAdmin)
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskStatus, TaskStatusAdmin)
admin.site.register(TaskTimeSlot, TaskTimeSlotAdmin)
| mit | -8,405,489,967,769,106,000 | 30.826087 | 94 | 0.669399 | false | 3.678392 | false | false | false |
luci/luci-py | appengine/swarming/tools/android/setup_udev.py | 2 | 4842 | #!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Generates the file /etc/udev/rules.d/android_swarming_bot.rules to enable
automatic Swarming bot to be fired up when an Android device with USB debugging
is connected.
"""
__version__ = '0.1'
import getpass
import optparse
import os
import string
import subprocess
import sys
import tempfile
THIS_FILE = os.path.abspath(__file__)
ROOT_DIR = os.path.dirname(THIS_FILE)
HEADER = '# This file was AUTOMATICALLY GENERATED with %s\n' % THIS_FILE
RULE_FILE = '/etc/udev/rules.d/android_swarming_bot.rules'
LETTERS_AND_DIGITS = frozenset(string.ascii_letters + string.digits)
def gen_udev_rule(user, dev_filters):
"""Generates the content of the udev .rules file."""
# The command executed must exit immediately.
script = os.path.join(ROOT_DIR, 'udev_start_bot_deferred.sh')
items = [
'ACTION=="add"',
'SUBSYSTEM=="usb"',
]
items.extend(dev_filters)
# - sudo -u <user> is important otherwise a user writeable script would be run
# as root.
# - -H makes it easier to find the user's local files.
# - -E is important, otherwise the necessary udev environment variables won't
# be set. Also we don't want to run the script as root.
items.append('RUN+="/usr/bin/sudo -H -E -u %s %s"' % (user, script))
line = ', '.join(items)
# https://code.google.com/p/swarming/issues/detail?id=127
# TODO(maruel): Create rule for ACTION=="remove" which would send a signal to
# the currently running process.
# TODO(maruel): The add rule should try to find a currently running bot first.
return HEADER + line + '\n'
def write_udev_rule(filepath):
"""Writes the udev rules file in /etc/udev/rules.d when run as root."""
with open(filepath, 'rb') as f:
content = f.read()
if os.path.isfile(RULE_FILE):
print('Overwritting existing file')
with open(RULE_FILE, 'w+b') as f:
f.write(content)
print('Wrote %d bytes successfully to %s' % (len(content), RULE_FILE))
def work(user, dev_filters):
"""The guts of this script."""
content = gen_udev_rule(user, dev_filters)
print('WARNING: About to write in %s:' % RULE_FILE)
print('***')
sys.stdout.write(content)
print('***')
raw_input('Press enter to continue or Ctrl-C to cancel.')
handle, filepath = tempfile.mkstemp(
prefix='swarming_bot_udev', suffix='.rules')
os.close(handle)
try:
with open(filepath, 'w+') as f:
f.write(content)
command = ['sudo', sys.executable, THIS_FILE, '--file', filepath]
print('Running: %s' % ' '.join(command))
return subprocess.call(command)
finally:
os.remove(filepath)
def test_device_rule(device):
# To find your device:
# unbuffer udevadm monitor --environment --udev --subsystem-match=usb
# | grep DEVNAME
# udevadm info -a -n <value from DEVNAME>
#
# sudo udevadm control --log-priority=debug
# udevadm info --query all --export-db | less
cmd = ['sudo', 'udevadm', 'test', '--action=add', device]
print('Running: %s' % ' '.join(cmd))
return subprocess.call(cmd)
def main():
if not sys.platform.startswith('linux'):
print('Only tested on linux')
return 1
parser = optparse.OptionParser(
description=sys.modules[__name__].__doc__, version=__version__)
parser.add_option('--file', help=optparse.SUPPRESS_HELP)
parser.add_option(
'-d',
'--dev_filters',
default=[],
action='append',
help='udev filters to use; get device id with "lsusb" then udev details '
'with "udevadm info -a -n /dev/bus/usb/002/001"')
parser.add_option(
'--user',
default=getpass.getuser(),
help='User account to start the bot with')
parser.add_option('--test', help='Tests the rule for a device')
options, args = parser.parse_args()
if args:
parser.error('Unsupported arguments %s' % args)
if options.test:
return test_device_rule(options.test)
if options.file:
if options.user != 'root':
parser.error('When --file is used, expected to be run as root')
else:
if options.user == 'root':
parser.error('Run as the user that will be used to run the bot')
if not LETTERS_AND_DIGITS.issuperset(options.user):
parser.error('User must be [a-zA-Z0-9]+')
os.chdir(ROOT_DIR)
if not os.path.isfile(os.path.join(ROOT_DIR, 'swarming_bot.zip')):
print('First download swarming_bot.zip aside this script')
return 1
if options.file:
write_udev_rule(options.file)
return 0
# 18d1==Google Inc. but we'd likely want to filter more broadly.
options.dev_filters = options.dev_filters or ['ATTR{idVendor}=="18d1"']
work(options.user, options.dev_filters)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 6,577,840,882,396,699,000 | 30.647059 | 80 | 0.668319 | false | 3.309638 | true | false | false |
marcin1j/griffith | lib/cover.py | 4 | 6903 | # -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2005-2009 Vasco Nunes, Piotr Ożarowski
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gtk
import os
import pango
import string
import sys
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.lib.units import mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Image
from reportlab.lib import colors
import db
import gutils
import version
exec_location = os.path.abspath(os.path.dirname(sys.argv[0]))
def cover_image(self,number):
filename = gutils.file_chooser(_("Select image"), \
action=gtk.FILE_CHOOSER_ACTION_OPEN, \
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, \
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
if filename[0]:
cover_image_process(self, filename[0], number)
def cover_image_process(self, filename, number):
size = self.widgets['print_cover']['ci_size'].get_active()
print_number = self.widgets['print_cover']['ci_number'].get_active()
if self.config.get('font', '') != '':
fontName = "custom_font"
pdfmetrics.registerFont(TTFont(fontName,self.config.get('font', '')))
else:
fontName = "Helvetica"
if size == 0:
#standard
cover_x=774
cover_y=518
elif size == 1:
#slim
cover_x=757;
cover_y=518
else:
#double slim
cover_x=757
cover_y=518
# A4 landscape definition
pageWidth = 842
pageHeight = 595
# hardcoded to A4
pos_x=(pageWidth-cover_x)/2;
pos_y=(pageHeight-cover_y)/2;
# make a pdf
# using a truetype font with unicode support
c = canvas.Canvas(os.path.join(self.griffith_dir, "cover.pdf"), \
(pageWidth, pageHeight))
c.setFont(fontName, 8)
# copyright line
c.drawString(20, 20 ,_("Cover generated by Griffith v").encode('utf-8') + \
version.pversion+" (C) 2004-2009 Vasco Nunes/Piotr Ozarowski - "+ \
_("Released Under the GNU/GPL License").encode('utf-8'))
# get movie information from db
movie = self.db.session.query(db.Movie).filter_by(number=number).first()
if movie is not None:
c.drawImage(filename, pos_x, pos_y, cover_x, cover_y)
if print_number:
c.setFillColor(colors.white)
c.rect((pageWidth/2)-13, 520, 26, 70, fill=1, stroke=0)
c.setFillColor(colors.black)
c.setFont(fontName, 10)
c.drawCentredString(pageWidth/2, 530, number)
# draw cover area
c.rect(pos_x, pos_y, cover_x, cover_y)
c.showPage()
c.save()
self.widgets['print_cover']['window_simple'].hide()
cover_file = os.path.join(self.griffith_dir, "cover.pdf")
if self.windows:
os.popen3("\"" + cover_file + "\"")
else:
os.popen3(self.pdf_reader + " " + cover_file)
def cover_simple(self, number):
size = self.widgets['print_cover']['cs_size'].get_active()
print_number = self.widgets['print_cover']['cs_include_movie_number'].get_active()
poster = self.widgets['print_cover']['cs_include_poster'].get_active()
if self.config.get('font', '')!='':
fontName = "custom_font"
pdfmetrics.registerFont(TTFont(fontName,self.config.get('font', '')))
else:
fontName = "Helvetica"
if size == 0:
#standard
cover_x=774
cover_y=518
elif size == 1:
#slim
cover_x=757;
cover_y=518
else:
#double slim
cover_x=757
cover_y=518
# A4 landscape definition
pageWidth = 842
pageHeight = 595
# hardcoded to A4
pos_x=(pageWidth-cover_x)/2;
pos_y=(pageHeight-cover_y)/2;
# make a pdf
c = canvas.Canvas(os.path.join(self.griffith_dir, "cover.pdf"), (pageWidth, pageHeight))
c.setFont(fontName,8)
# copyright line
c.drawString(20,20,_("Cover generated by Griffith v").encode('utf-8') + \
version.pversion+" (C) 2004-2009 Vasco Nunes/Piotr Ozarowski - "+ \
_("Released Under the GNU/GPL License").encode('utf-8'))
# draw cover area
c.rect(pos_x, pos_y, cover_x, cover_y)
# get movie information from db
movie = self.db.session.query(db.Movie).filter_by(number=number).first()
if movie is not None:
if print_number:
c.setFont(fontName, 10)
c.drawCentredString(pageWidth/2, 530, number)
c.setFont(fontName, 16)
c.rotate(90)
c.drawString(60, (-pageWidth/2)-8, movie.o_title.encode('utf-8'))
c.rotate(-90)
if movie.poster_md5:
filename = gutils.get_image_fname(movie.poster_md5, self.db)
if filename:
c.drawImage(filename, x=(pageWidth-30)/2, y=470, width=30, height=50)
# print movie info
c.setFont(fontName, 8)
textObject = c.beginText()
textObject.setTextOrigin(pageWidth-cover_x, 300)
textObject.setFont(fontName, 8)
textObject.textLine("%s: %s" % (_('Original Title'), movie.o_title))
textObject.textLine("%s: %s" % (_('Title'), movie.title))
textObject.textLine('')
textObject.textLine("%s: %s" % (_('Director'), movie.director))
textObject.textLine('')
textObject.textLine("%s: %s %s" % (_('Running Time'), movie.runtime, _(' min')))
textObject.textLine("%s: %s" % (_('Country'), movie.country))
textObject.textLine("%s: %s" % (_('Genre'), movie.genre))
textObject.textLine('')
c.drawText(textObject)
# draw bigger poster image
if poster and movie.poster_md5 and filename:
c.drawImage(filename, x=(pageWidth-(pageWidth-cover_x)-235), y=(pageHeight/2)-125, width=180, height=250)
c.showPage()
c.save()
self.widgets['print_cover']['window_simple'].hide()
cover_file = os.path.join(self.griffith_dir, 'cover.pdf')
if self.windows:
os.popen3("\"" + cover_file + "\"")
elif self.mac:
os.popen3("open -a Preview" + " " + cover_file)
else:
os.popen3(self.pdf_reader + " " + cover_file)
| gpl-2.0 | 2,888,838,638,765,462,500 | 33.51 | 117 | 0.627934 | false | 3.366829 | false | false | false |
gdude2002/InfoBot | bot/sections/url.py | 1 | 2959 | # coding=utf-8
from typing import List
import aiohttp
from bot.sections.base import BaseSection
from bot.utils import line_splitter
__author__ = "Gareth Coles"
class URLSection(BaseSection):
_type = "url"
cached_lines = []
def __init__(self, name, url=None, header="", footer=""):
super().__init__(name, header=header, footer=footer)
self.url = url or ""
async def process_command(self, command, data, data_string, client, message) -> str:
if command == "set":
if len(data) < 1:
return "Usage: `set \"<url>\"`"
url = data[0]
if not url:
return "Please supply a URL to retrieve text from"
while url[0] in "`<" and url[-1] in "`>" and url[0] == url[-1]:
url = url[1:-1]
session = aiohttp.ClientSession()
try:
async with session.get(url, timeout=30) as resp:
text = await resp.text()
self.cached_lines = self.split_paragraphs(text)
except Exception as e:
return "Failed to retrieve URL: `{}`".format(e)
else:
self.url = url
client.sections_updated(message.server)
return "URL set; retrieved `{}` messages' worth of text".format(len(self.cached_lines))
finally:
session.close()
if command == "get":
if not self.url:
return "No URL has been set."
return "Current URL: `{}`".format(self.url)
return "Unknown command: `{}`\n\nAvailable command: `set`, `get`".format(command)
def split_paragraphs(self, text):
parts = text.split("\n\n")
done = []
for i, part in enumerate(parts):
if i < len(parts) - 1:
done.append(part + "\n\u200b")
else:
done.append(part)
return line_splitter(done, 2000, split_only=True)
async def render(self) -> List[str]:
if not self.url:
return ["**A URL has not been set for this section**"]
session = aiohttp.ClientSession()
try:
async with session.get(self.url, timeout=30) as resp:
text = await resp.text()
self.cached_lines = self.split_paragraphs(text)
except Exception as e:
return ["**ERROR**: Failed to retrieve URL: `{}`".format(self.url, e)]
else:
return self.cached_lines
finally:
session.close()
async def show(self) -> List[str]:
return [
"section \"{}\" set \"{}\"".format(self.name, self.url)
]
def to_dict(self) -> dict:
return {
"url": self.url,
"header": self.header,
"footer": self.footer
}
@staticmethod
def from_dict(name, data) -> "URLSection":
return URLSection(name, **data)
| artistic-2.0 | -2,659,862,792,311,012,400 | 29.193878 | 103 | 0.520108 | false | 3.977151 | false | false | false |
Jeremy-WEI/python-mode | pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/misc.py | 17 | 3260 | # pylint: disable=W0511
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Copyright (c) 2000-2010 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
Check source code is ascii only or has an encoding declaration (PEP 263)
"""
import re
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
MSGS = {
'W0511': ('%s',
'fixme',
'Used when a warning note as FIXME or XXX is detected.'),
'W0512': ('Cannot decode using encoding "%s", unexpected byte at position %d',
'invalid-encoded-data',
'Used when a source line cannot be decoded using the specified '
'source file encoding.',
{'maxversion': (3, 0)}),
}
class EncodingChecker(BaseChecker):
"""checks for:
* warning notes in the code like FIXME, XXX
* encoding issues.
"""
__implements__ = IRawChecker
# configuration section name
name = 'miscellaneous'
msgs = MSGS
options = (('notes',
{'type' : 'csv', 'metavar' : '<comma separated values>',
'default' : ('FIXME', 'XXX', 'TODO'),
'help' : 'List of note tags to take in consideration, \
separated by a comma.'
}),
)
def _check_note(self, notes, lineno, line):
match = notes.search(line)
if match:
self.add_message('fixme', args=line[match.start():-1], line=lineno)
def _check_encoding(self, lineno, line, file_encoding):
try:
return unicode(line, file_encoding)
except UnicodeDecodeError, ex:
self.add_message('invalid-encoded-data', line=lineno,
args=(file_encoding, ex.args[2]))
def process_module(self, module):
"""inspect the source file to find encoding problem or fixmes like
notes
"""
stream = module.file_stream
stream.seek(0) # XXX may be removed with astroid > 0.23
if self.config.notes:
notes = re.compile('|'.join(self.config.notes))
else:
notes = None
if module.file_encoding:
encoding = module.file_encoding
else:
encoding = 'ascii'
for lineno, line in enumerate(stream):
line = self._check_encoding(lineno+1, line, encoding)
if line is not None and notes:
self._check_note(notes, lineno+1, line)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(EncodingChecker(linter))
| lgpl-3.0 | -2,651,672,956,548,905,500 | 35.222222 | 82 | 0.622086 | false | 4.174136 | false | false | false |
moreati/trac-gitsvn | trac/tests/perm.py | 4 | 11457 | from trac import perm
from trac.core import *
from trac.test import EnvironmentStub
import unittest
class DefaultPermissionStoreTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionGroupProvider])
self.store = perm.DefaultPermissionStore(self.env)
def tearDown(self):
self.env.reset_db()
def test_simple_actions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('john', 'WIKI_MODIFY'),
('john', 'REPORT_ADMIN'),
('kate', 'TICKET_CREATE')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEquals(['TICKET_CREATE'],
self.store.get_user_permissions('kate'))
def test_simple_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_nested_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('admin', 'dev'),
('john', 'admin')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_mixed_case_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('Dev', 'WIKI_MODIFY'),
('Dev', 'REPORT_ADMIN'),
('Admin', 'Dev'),
('john', 'Admin')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_builtin_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('authenticated', 'WIKI_MODIFY'),
('authenticated', 'REPORT_ADMIN'),
('anonymous', 'TICKET_CREATE')])
self.assertEquals(['REPORT_ADMIN', 'TICKET_CREATE', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEquals(['TICKET_CREATE'],
self.store.get_user_permissions('anonymous'))
def test_get_all_permissions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
expected = [('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')]
for res in self.store.get_all_permissions():
self.failIf(res not in expected)
class TestPermissionRequestor(Component):
implements(perm.IPermissionRequestor)
def get_permission_actions(self):
return ['TEST_CREATE', 'TEST_DELETE', 'TEST_MODIFY',
('TEST_CREATE', []),
('TEST_ADMIN', ['TEST_CREATE', 'TEST_DELETE']),
('TEST_ADMIN', ['TEST_MODIFY'])]
class PermissionSystemTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.PermissionSystem,
perm.DefaultPermissionStore,
TestPermissionRequestor])
self.perm = perm.PermissionSystem(self.env)
def tearDown(self):
self.env.reset_db()
def test_all_permissions(self):
self.assertEqual({'EMAIL_VIEW': True, 'TRAC_ADMIN': True,
'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions())
def test_simple_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_DELETE')
self.perm.grant_permission('jane', 'TEST_MODIFY')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_DELETE': True, 'TEST_MODIFY': True},
self.perm.get_user_permissions('jane'))
def test_meta_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions('jane'))
def test_get_all_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
expected = [('bob', 'TEST_CREATE'),
('jane', 'TEST_ADMIN')]
for res in self.perm.get_all_permissions():
self.failIf(res not in expected)
def test_expand_actions_iter_7467(self):
# Check that expand_actions works with iterators (#7467)
perms = set(['EMAIL_VIEW', 'TRAC_ADMIN', 'TEST_DELETE', 'TEST_MODIFY',
'TEST_CREATE', 'TEST_ADMIN'])
self.assertEqual(perms, self.perm.expand_actions(['TRAC_ADMIN']))
self.assertEqual(perms, self.perm.expand_actions(iter(['TRAC_ADMIN'])))
class PermissionCacheTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionRequestor])
self.perm_system = perm.PermissionSystem(self.env)
# by-pass DefaultPermissionPolicy cache:
perm.DefaultPermissionPolicy.CACHE_EXPIRY = -1
self.perm_system.grant_permission('testuser', 'TEST_MODIFY')
self.perm_system.grant_permission('testuser', 'TEST_ADMIN')
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_contains(self):
self.assertEqual(True, 'TEST_MODIFY' in self.perm)
self.assertEqual(True, 'TEST_ADMIN' in self.perm)
self.assertEqual(False, 'TRAC_ADMIN' in self.perm)
def test_has_permission(self):
self.assertEqual(True, self.perm.has_permission('TEST_MODIFY'))
self.assertEqual(True, self.perm.has_permission('TEST_ADMIN'))
self.assertEqual(False, self.perm.has_permission('TRAC_ADMIN'))
def test_require(self):
self.perm.require('TEST_MODIFY')
self.perm.require('TEST_ADMIN')
self.assertRaises(perm.PermissionError, self.perm.require, 'TRAC_ADMIN')
def test_assert_permission(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TRAC_ADMIN')
def test_cache(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testuser', 'TEST_ADMIN')
# Using cached GRANT here
self.perm.assert_permission('TEST_ADMIN')
def test_cache_shared(self):
# we need to start with an empty cache here (#7201)
perm1 = perm.PermissionCache(self.env, 'testcache')
perm1 = perm1('ticket', 1)
perm2 = perm1('ticket', 1) # share internal cache
self.perm_system.grant_permission('testcache', 'TEST_ADMIN')
perm1.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testcache', 'TEST_ADMIN')
# Using cached GRANT here (from shared cache)
perm2.assert_permission('TEST_ADMIN')
class TestPermissionPolicy(Component):
implements(perm.IPermissionPolicy)
def __init__(self):
self.allowed = {}
self.results = {}
def grant(self, username, permissions):
self.allowed.setdefault(username, set()).update(permissions)
def revoke(self, username, permissions):
self.allowed.setdefault(username, set()).difference_update(permissions)
def check_permission(self, action, username, resource, perm):
result = action in self.allowed.get(username, set()) or None
self.results[(username, action)] = result
return result
class PermissionPolicyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionPolicy,
TestPermissionRequestor])
self.env.config.set('trac', 'permission_policies', 'TestPermissionPolicy')
self.policy = TestPermissionPolicy(self.env)
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_no_permissions(self):
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_MODIFY')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_ADMIN')
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): None,
('testuser', 'TEST_ADMIN'): None})
def test_grant_revoke_permissions(self):
self.policy.grant('testuser', ['TEST_MODIFY', 'TEST_ADMIN'])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): True})
def test_policy_chaining(self):
self.env.config.set('trac', 'permission_policies', 'TestPermissionPolicy,DefaultPermissionPolicy')
self.policy.grant('testuser', ['TEST_MODIFY'])
system = perm.PermissionSystem(self.env)
system.grant_permission('testuser', 'TEST_ADMIN')
self.assertEqual(list(system.policies),
[self.policy,
perm.DefaultPermissionPolicy(self.env)])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): None})
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DefaultPermissionStoreTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionSystemTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionCacheTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionPolicyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,151,009,814,084,210,800 | 41.276753 | 106 | 0.585581 | false | 4.237056 | true | false | false |
tjelvar-olsson/tinyfasta | tinyfasta/__init__.py | 1 | 5416 | """Package for parsing and generating FASTA files of biological sequences.
Use the :class:`tinyfasta.FastaParser` class to parse FASTA files.
To generate FASTA files use the :func:`tinyfasta.FastaRecord.create` static
method to create :class:`tinyfasta.FastaRecord` instances, which can be written
to file.
"""
__version__ = "0.1.0"
class _FastaRecordComponent(object):
"""Component of a FastaRecort."""
def contains(self, search_term):
"""Return True if the component contains the search term.
:param search_term: string or compiled regular expression to search for
:returns: bool
"""
if hasattr(search_term, "search"):
return search_term.search(self._content) is not None
return self._content.find(search_term) != -1
class Sequence(_FastaRecordComponent):
"""Class representing a biological sequence."""
def __init__(self):
self._sequences = []
def __str__(self):
return self._content
def __len__(self):
"""Return the length of the biological sequence."""
return sum(len(s) for s in self._sequences)
@property
def _content(self):
"""Return the sequence as a string.
:returns: str
"""
return ''.join(self._sequences)
def add_sequence_line(self, sequence_line):
"""
Add a sequence line to the :class:`tinyfasta.Sequence` instance.
This function can be called more than once. Each time the function is
called the :class:`tinyfasta.Sequence` is extended by the sequence line
provided.
:param sequence_line: string representing (part of) a sequence
"""
self._sequences.append( sequence_line.strip() )
def format_line_length(self, line_length=80):
"""Format line length used to represent the sequence.
The full sequence is stored as list of shorter sequences. These shorter
sequences are used verbatim when writing out the
:class:`tinyfasta.FastaRecord` over several lines.
:param line_length: length of the sequences used to make up the full
sequence
"""
def string_to_list(seq, n):
"""Return list of strings of length n."""
return [seq[i:i+n] for i in range(0, len(seq), n)]
self._sequences = string_to_list(self._content, line_length)
class FastaRecord(object):
"""Class representing a FASTA record."""
class Description(_FastaRecordComponent):
"""Description line in a :class:`tinyfasta.FastaRecord`."""
def __init__(self, description):
self.update(description)
def __str__(self):
return self._content
def update(self, description):
"""Update the content of the description.
This function can be used to replace the existing description with
a new one.
:param description: new description string
"""
if not description.startswith(">"):
description = ">{}".format(description)
self._content = description.strip()
@staticmethod
def create(description, sequence):
"""Return a FastaRecord.
:param description: description string
:param sequence: full sequence string
:returns: :class:`tinyfasta.FastaRecord`
"""
fasta_record = FastaRecord(description)
fasta_record.add_sequence_line(sequence)
fasta_record.sequence.format_line_length()
return fasta_record
def __init__(self, description):
"""Initialise an instance of the :class:`tinyfasta.FastaRecord` class.
:param description: description string
"""
self.description = FastaRecord.Description(description)
self.sequence = Sequence()
def __str__(self):
"""String representation of the :class:`tinyfasta.FastaRecord` instance."""
lines = [str(self.description),]
lines.extend(self.sequence._sequences)
return '\n'.join(lines)
def __len__(self):
"""Return the length of the biological sequence."""
return len(self.sequence)
def add_sequence_line(self, sequence_line):
"""Add a sequence line to the :class:`tinyfasta.FastaRecord` instance.
This function can be called more than once. Each time the function is
called the :attr:`tinyfasta.sequence` is extended by the sequence line
provided.
:param sequence_line: string representing (part of) a sequence
"""
self.sequence.add_sequence_line(sequence_line)
class FastaParser(object):
"""Class for parsing FASTA files."""
def __init__(self, fpath):
"""Initialise an instance of the FastaParser.
:param fpath: path to the FASTA file to be parsed
"""
self.fpath = fpath
def __iter__(self):
"""Yield FastaRecord instances."""
fasta_record = None
with open(self.fpath, 'r') as fh:
for line in fh:
if line.startswith('>'):
if fasta_record:
yield fasta_record
fasta_record = FastaRecord(line)
else:
fasta_record.add_sequence_line(line)
yield fasta_record
| mit | 1,464,712,525,209,946,400 | 32.639752 | 83 | 0.605059 | false | 4.513333 | false | false | false |
FrauBluher/ShR2 | Web Stack/webapp/models.py | 2 | 9505 | from django.db import models
from django.contrib.auth.models import User
from microdata.models import Device, Appliance
from django.conf import settings
from recurrence.fields import RecurrenceField
from paintstore.fields import ColorPickerField
# Create your models here.
class EventNotification(models.Model):
"""
Notification sent to users via email whenever a notable event is detected.
This class is not currently in use since the system is not set up in such
a way as to detect any events. However, the notification framework is in
place such that when the functionality is added, this class should be called
in response to an event.
These notifications can be added/modified via the admin interface.
"""
description = models.CharField(
max_length=300,
help_text="Label to notification as shown to a user"
)
""" The description of the event notification as a user would see it when selecting/deselecting the notification in the settings interface"""
keyword = models.CharField(
max_length=300,
help_text="Keyword used to launch manage.py email_event",
)
""" Used to trigger the event notification in the django manager."""
watts_above_average = models.FloatField()
""" Proof of concept field to provide a threshold. If a group of appliances surpasses the threshold for a period of time, then send the email."""
period_of_time = models.FloatField(
help_text="Period of time to watch for irregularity"
)
""" Proof of concept field to provide a threshold. If a group of appliances surpasses the threshold for a period of time, then send the email."""
appliances_to_watch = models.ManyToManyField(Appliance)
""" Assemble a group of appliances to watch. Could be one or many."""
email_subject = models.CharField(max_length=300)
""" An email-friendly subject for the event notification."""
email_body = models.FileField(
help_text="Template file for email body. {{ x }} denotes x is a template variable",
upload_to="event"
)
""" A template used to generate the notification email body."""
def __unicode__(self):
return self.description
class IntervalNotification(models.Model):
"""
Notifications sent to users when a specified period has elapsed.
This class is also proof-of-concept, and it relies upon the `Amazon Simple Email Service <http://aws.amazon.com/ses/>`_.
An email will be sent to users who opt in to the notification summarizing their devices' energy usage over the specified
period.
"""
description = models.CharField(
max_length=300,
help_text="Label to notification as shown to a user",
)
""" The description of the event notification as a user would see it when selecting/deselecting the notification in the settings interface"""
recurrences = RecurrenceField(blank=True, null=True)
""" This field is treated much like a Google Calendars recurrence field. Provides an easy way for an admin to define new periods of time."""
email_subject = models.CharField(max_length=300)
""" An email-friendly subject for the event notification."""
email_body = models.FileField(
help_text="Template file for email body. {{ x }} denotes template variable",
upload_to="interval"
)
""" A template used to generate the notification email body."""
def __unicode__(self):
return self.description
class Notification(models.Model):
"""
DEPRECATED
"""
user = models.OneToOneField(User)
interval_notification = models.ManyToManyField(IntervalNotification)
def __unicode__(self):
return 'Notification '+str(self.pk)
class UserSettings(models.Model):
"""
An encapsulating module that links a user's settings together.
This model can be extended to include new settings that may come to be in the future.
"""
user = models.OneToOneField(User)
""" The related model for a settings model."""
interval_notification = models.ManyToManyField(IntervalNotification, blank=True)
""" A list of interval notifications that the user has opted in to. Default to none."""
event_notification = models.ManyToManyField(EventNotification, blank=True)
""" A list of event notifications that the user has opted in to. Default to none."""
class UtilityCompany(models.Model):
"""
A placeholder class to describe a Utility Company.
Since PG&E is the only company that was developed on during the proof-of-concept phase,
it is the company that was used to model the pricing structures. In the future, in order
to integrate new types of companies, a Utility Company model should reflect how the Utility
Company calculates cost.
"""
description = models.CharField(max_length=300)
""" A label that describes what company this is. Used for selection."""
class Meta:
verbose_name_plural = "Utility Companies"
def __unicode__(self):
return self.description
class RatePlan(models.Model):
"""
The base class that describes how a user is charged in the Utility Company.
This class is linked to :class:`webapp.models.UtilityCompany` via a ForeignKey.
In addition, the class contains a list of :class:`webapp.models.Tier` objects
that describe how the charges change based on usage.
"""
utility_company = models.ForeignKey(UtilityCompany)
""" Utility company relation. Describe who owns the :class:`webapp.models.RatePlan`"""
description = models.CharField(max_length=300)
""" A short description for the user when selecting their :class:`webapp.models.RatePlan`."""
data_source = models.URLField()
""" A simple URL field that links to the source of the data for this :class:`webapp.models.RatePlan`."""
min_charge_rate = models.FloatField(help_text="$ Per meter per day")
""" The minimum amount charged to a user's account. Not currently in use."""
california_climate_credit = models.FloatField(help_text="$ Per household, per semi-annual payment occurring in the April and October bill cycles")
""" A credit applied to a user's account twice yearly. Not currently in use."""
def __unicode__(self):
return self.utility_company.__unicode__() + ": " + self.description
class Tier(models.Model):
"""
A class that defines the cost and threshold of a :class:`webapp.models.RatePlan`.
A :class:`webapp.models.RatePlan` typically has 4-5 :class:`webapp.models.Tier` objects
as a relation. These objects keep track of the cost modifier as well as the KWh threshold
for a given device.
"""
rate_plan = models.ForeignKey(RatePlan)
""" This object is related to a :class:`webapp.models.RatePlan`."""
tier_level = models.IntegerField(blank=True, null=True)
""" An Integer, starting at 1, indicating the current level of the device."""
max_percentage_of_baseline = models.FloatField(help_text="blank for no maximum",blank=True, null=True)
""" This defines the threshold for a given :class:`webapp.models.Tier`. I.e. 100% - 130%"""
rate = models.FloatField(help_text="$",blank=True, null=True)
""" The actual cost of a KWh at this level."""
chart_color = ColorPickerField()
""" Color used by charts when graphing a :class:`webapp.models.Tier`."""
def __unicode__(self):
return 'Tier ' + str(self.tier_level)
class Territory(models.Model):
"""
A :class:`webapp.models.Territory` defines specifically key fields associated with a :class:`webapp.models.RatePlan`.
This class specifies the base rates of a given :class:`webapp.models.RatePlan` as well as defining
the winter and summer seasons for seasonal pricing.
"""
rate_plan = models.ForeignKey(RatePlan)
""" This object is related to a :class:`webapp.models.RatePlan`."""
description = models.CharField(max_length=300)
""" A short description for the user when selecting their :class:`webapp.models.RatePlan`."""
data_source = models.URLField()
""" A simple URL field that links to the source of the data for this :class:`webapp.models.RatePlan`."""
summer_start = models.IntegerField(blank=True,null=True,help_text="Specify Month of year")
""" A month of the year that specifies the start of summer. 1-12."""
winter_start = models.IntegerField(blank=True,null=True,help_text="Specify Month of year")
""" A month of the year that specifies the start of winter. 1-12."""
summer_rate = models.FloatField(help_text="Baseline quantity (kWh per day)")
""" The base rate for the summer season."""
winter_rate = models.FloatField(help_text="Baseline quantity (kWh per day)")
""" The base rate for the winter season."""
class Meta:
verbose_name_plural = "Territories"
def __unicode__(self):
return self.description
class DeviceWebSettings(models.Model):
"""
An encapsulating module that links a device's settings together.
This model can be extended to include new settings that may come to be in the future.
"""
device = models.OneToOneField(Device)
utility_companies = models.ManyToManyField(UtilityCompany, default=[1])
rate_plans = models.ManyToManyField(RatePlan, default=[1])
territories = models.ManyToManyField(Territory, default=[1])
current_tier = models.ForeignKey(Tier, default=1)
class DashboardSettings(models.Model):
user = models.OneToOneField(User)
stack = models.BooleanField(
default=True,
help_text="Specifies the default behavior for a graph: stacked or unstacked line chart"
)
| mit | 9,084,803,312,958,863,000 | 38.115226 | 149 | 0.716465 | false | 4.179859 | false | false | false |
blockchain-certificates/cert-viewer | cert_viewer/helpers.py | 2 | 1312 | import binascii
import sys
from cert_core import Chain, UnknownChainError
unhexlify = binascii.unhexlify
hexlify = binascii.hexlify
if sys.version > '3':
unhexlify = lambda h: binascii.unhexlify(h.encode('utf8'))
hexlify = lambda b: binascii.hexlify(b).decode('utf8')
def obfuscate_email_display(email):
"""Partially hides email before displaying"""
hidden_email_parts = email.split("@")
hidden_email = hidden_email_parts[0][:2] + ("*" * (len(hidden_email_parts[0]) - 2)) + "@" + hidden_email_parts[1]
return hidden_email
def get_tx_lookup_chain(chain, txid):
if chain == Chain.bitcoin_testnet:
return 'https://live.blockcypher.com/btc-testnet/tx/' + txid
elif chain == Chain.bitcoin_mainnet:
return 'https://blockchain.info/tx/' + txid
elif chain == Chain.bitcoin_regtest or chain == Chain.mockchain:
return 'This has not been issued on a blockchain and is for testing only'
elif chain == Chain.ethereum_mainnet:
return 'https://api.etherscan.io/tx/' + txid
elif chain == Chain.ethereum_ropsten:
return 'https://ropsten.etherscan.io/tx/' + txid
else:
raise UnknownChainError(
'unsupported chain (%s) requested with blockcypher collector. Currently only testnet and mainnet are supported' % chain)
| mit | -3,548,617,908,347,158,500 | 38.757576 | 132 | 0.681402 | false | 3.555556 | false | false | false |
yasir1brahim/OLiMS | lims/__init__.py | 2 | 9314 | import warnings
import pkg_resources
# __version__ = pkg_resources.get_distribution("bika.lims").version
# import this to create messages in the bika domain.
from dependencies.dependency import MessageFactory
bikaMessageFactory = MessageFactory('bika')
from dependencies.dependency import PloneMessageFactory as PMF
# import this to log messages
import logging
logger = logging.getLogger('Bika')
from lims.validators import *
from lims.config import *
from lims.permissions import *
# from dependencies.dependency import ModuleSecurityInfo, allow_module
from dependencies.dependency import process_types, listTypes
from dependencies.dependency import registerDirectory
from dependencies.dependency import ContentInit, ToolInit, getToolByName
from dependencies.dependency import PloneMessageFactory
from dependencies.dependency import IPloneSiteRoot
from dependencies.dependency import EXTENSION
# from dependencies.dependency import _profile_registry as profile_registry
# allow_module('AccessControl')
# allow_module('bika.lims')
# allow_module('bika.lims.config')
# allow_module('bika.lims.permissions')
# allow_module('bika.lims.utils')
# allow_module('json')
# allow_module('pdb')
# allow_module('zope.i18n.locales')
# allow_module('zope.component')
# allow_module('plone.registry.interfaces')
def initialize(context):
from content.analysis import Analysis
from content.analysiscategory import AnalysisCategory
from content.analysisrequest import AnalysisRequest
from content.analysisrequestsfolder import AnalysisRequestsFolder
from content.analysisservice import AnalysisService
from content.analysisspec import AnalysisSpec
from content.arimport import ARImport
from content.arimportitem import ARImportItem
from content.arpriority import ARPriority
from content.analysisprofile import AnalysisProfile
from content.arreport import ARReport
from content.artemplate import ARTemplate
from content.attachment import Attachment
from content.attachmenttype import AttachmentType
from content.batch import Batch
from content.batchfolder import BatchFolder
from content.batchlabel import BatchLabel
from content.bikaschema import BikaSchema
from content.bikasetup import BikaSetup
from content.calculation import Calculation
from content.client import Client
from content.clientfolder import ClientFolder
from content.contact import Contact
from content.container import Container
from content.containertype import ContainerType
from content.department import Department
from content.duplicateanalysis import DuplicateAnalysis
from content.instrument import Instrument
from content.instrumentcalibration import InstrumentCalibration
from content.instrumentcertification import InstrumentCertification
from content.instrumentmaintenancetask import InstrumentMaintenanceTask
from content.instrumentscheduledtask import InstrumentScheduledTask
from content.instrumentvalidation import InstrumentValidation
from content.instrumenttype import InstrumentType
from content.invoice import Invoice
from content.invoicebatch import InvoiceBatch
from content.invoicefolder import InvoiceFolder
from content.labcontact import LabContact
from content.laboratory import Laboratory
from content.labproduct import LabProduct
from content.manufacturer import Manufacturer
from content.method import Method
from content.methods import Methods
from content.multifile import Multifile
from content.organisation import Organisation
from content.person import Person
from content.preservation import Preservation
from content.pricelist import Pricelist
from content.pricelistfolder import PricelistFolder
from content.queryfolder import QueryFolder
from content.query import Query
from content.referenceanalysis import ReferenceAnalysis
from content.referencedefinition import ReferenceDefinition
from content.referencesample import ReferenceSample
from content.referencesamplesfolder import ReferenceSamplesFolder
from content.rejectanalysis import RejectAnalysis
from content.report import Report
from content.reportfolder import ReportFolder
from content.sample import Sample
from content.samplecondition import SampleCondition
from content.samplematrix import SampleMatrix
from content.samplepartition import SamplePartition
from content.samplepoint import SamplePoint
from content.storagelocation import StorageLocation
from content.samplesfolder import SamplesFolder
from content.sampletype import SampleType
from content.samplingdeviation import SamplingDeviation
from content.srtemplate import SRTemplate
from content.subgroup import SubGroup
from content.supplier import Supplier
from content.suppliercontact import SupplierContact
from content.supplyorderfolder import SupplyOrderFolder
from content.supplyorder import SupplyOrder
from content.worksheet import Worksheet
from content.worksheetfolder import WorksheetFolder
from content.worksheettemplate import WorksheetTemplate
from controlpanel.bika_analysiscategories import AnalysisCategories
from controlpanel.bika_analysisservices import AnalysisServices
from controlpanel.bika_analysisspecs import AnalysisSpecs
from controlpanel.bika_analysisprofiles import AnalysisProfiles
from controlpanel.bika_artemplates import ARTemplates
from controlpanel.bika_arpriorities import ARPriorities
from controlpanel.bika_attachmenttypes import AttachmentTypes
from controlpanel.bika_batchlabels import BatchLabels
from controlpanel.bika_calculations import Calculations
from controlpanel.bika_containers import Containers
from controlpanel.bika_containertypes import ContainerTypes
from controlpanel.bika_departments import Departments
from controlpanel.bika_instruments import Instruments
from controlpanel.bika_instrumenttypes import InstrumentTypes
from controlpanel.bika_labcontacts import LabContacts
from controlpanel.bika_labproducts import LabProducts
from controlpanel.bika_manufacturers import Manufacturers
from controlpanel.bika_preservations import Preservations
from controlpanel.bika_referencedefinitions import ReferenceDefinitions
from controlpanel.bika_sampleconditions import SampleConditions
from controlpanel.bika_samplematrices import SampleMatrices
from controlpanel.bika_samplepoints import SamplePoints
from controlpanel.bika_storagelocations import StorageLocations
from controlpanel.bika_sampletypes import SampleTypes
from controlpanel.bika_samplingdeviations import SamplingDeviations
from controlpanel.bika_srtemplates import SRTemplates
from controlpanel.bika_subgroups import SubGroups
from controlpanel.bika_suppliers import Suppliers
from controlpanel.bika_worksheettemplates import WorksheetTemplates
content_types, constructors, ftis = process_types(
listTypes(PROJECTNAME),
PROJECTNAME)
# Register each type with it's own Add permission
# use ADD_CONTENT_PERMISSION as default
allTypes = zip(content_types, constructors)
for atype, constructor in allTypes:
kind = "%s: Add %s" % (config.PROJECTNAME, atype.portal_type)
perm = ADD_CONTENT_PERMISSIONS.get(atype.portal_type,
ADD_CONTENT_PERMISSION)
ContentInit(kind,
content_types = (atype,),
permission = perm,
extra_constructors = (constructor,),
fti = ftis,
).initialize(context)
def deprecated(comment=None, replacement=None):
""" A decorator which can be used to mark functions as deprecated.
Emits a DeprecationWarning showing the module and method being flagged
as deprecated. If replacement is set, the warn will also show which is
the function or class to be used instead.
"""
def old(oldcall):
def new(*args, **kwargs):
message = "Deprecated: '%s.%s'" % \
(oldcall.__module__,
oldcall.__name__)
if replacement is not None:
message += ". Use '%s.%s' instead" % \
(replacement.__module__,
replacement.__name__)
if comment is not None:
message += ". %s" % comment
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return oldcall(*args, **kwargs)
return new
return old
class _DeprecatedClassDecorator(object):
""" A decorator which can be used to mark symbols as deprecated.
Emits a DeprecationWarning showing the symbol being flagged as
deprecated. For add comments, use deprecated() instead of it
"""
def __call__(self, symbol):
message = "Deprecated: '%s.%s'" % \
(symbol.__module__,
symbol.__name__)
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return symbol
deprecatedsymbol = _DeprecatedClassDecorator()
del _DeprecatedClassDecorator
def enum(**enums):
return type('Enum', (), enums)
| agpl-3.0 | -8,388,926,910,800,973,000 | 44.213592 | 78 | 0.762293 | false | 4.858633 | false | false | false |
mdavidsaver/jmbgsddb | tools/jb_plot.py | 2 | 6208 |
from __future__ import print_function
import sys
import numpy
from matplotlib.pylab import *
def plt_ref_orbit(s, lng):
subplot(1, 2, 1)
plot(s, lng[:, 0], '-b')
xlabel('s [m]'); ylabel('phase [rad]')
subplot(1, 2, 2)
plot(s, lng[:, 1], '-b')
xlabel('s [m]'); ylabel('E_k [MeV]')
def plt_moment0(s, moment0):
for i, L in zip(range(6),
('x [mm]', 'p_x [mrad]', 'y [mm]', 'p_y [mrad]',
'z [rad]', 'p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment0[:, j, i], '-b')
plot(s, moment0[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment1(s, moment1):
for i, L in zip(range(6),
('s_x [mm]', 's_p_x [mrad]', 's_y [mm]', 's_p_y [mrad]',
's_z [rad]', 's_p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment1[:, j, i], '-b')
plot(s, moment1[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment0_diff(s, moment0):
for i, L in zip(range(6),
('x [mm]', 'p_x [mrad]', 'y [mm]', 'p_y [mrad]',
'z [rad]', 'p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment0[:, j, i], '-b')
plot(s, moment0[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment1_diff(s, moment1):
for i, L in zip(range(6),
('s_x [mm]', 's_p_x [mrad]', 's_y [mm]', 's_p_y [mrad]',
's_z [rad]', 's_p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment1[:, j, i], '-b')
plot(s, moment1[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def rd_long(file_name):
file = open(file_name, 'r')
s = []; lng = []
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
term = numpy.array([[fields[1], fields[2]]], dtype=float)
if first:
lng = term
first = False
else:
lng = numpy.append(lng, term, 0)
return [s, lng]
def rd_long_TLM(file_name):
file = open(file_name, 'r')
s = []; lng = []
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
term = numpy.array([[fields[2], fields[1]]], dtype=float)
if first:
lng = term
first = False
else:
lng = numpy.append(lng, term, 0)
return [s, lng]
def rd_data(file_name):
file = open(file_name, 'r')
s = []; moment0 = []; padding = numpy.array([NaN, NaN, NaN, NaN, NaN, NaN])
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
if len(fields) == 19:
term = numpy.array([[fields[1:7], fields[7:13],
padding, padding, padding,
fields[13:19]]], dtype=float)
else:
term = numpy.array([[fields[1:7], fields[7:13], fields[13:19],
fields[19:25], fields[25:31],
fields[31:37]]], dtype=float)
if first:
moment0 = term
first = False
else:
moment0 = numpy.append(moment0, term, 0)
return [s, moment0]
def plt_long(fig_no, title, s, lng):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_ref_orbit(s, lng)
return fig
def plt0(fig_no, title, s, moment0):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_moment0(s, moment0)
return fig
def plt1(fig_no, title, s, moment1):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_moment0(s, moment1)
return fig
#[file_name1, file_name2] = [sys.argv[1], sys.argv[2]]
file_name1 = '/home/johan/git_repos/flame/build/src/ref_orbit.txt'
file_name2 = '/home/johan/tlm_workspace/TLM_JB/tab_jb.txt'
file_name3 = '/home/johan/git_repos/flame/build/src/moment0.txt'
file_name4 = '/home/johan/git_repos/flame/build/src/moment1.txt'
file_name5 = '/home/johan/tlm_workspace/TLM_JB/moment0_TLM.txt'
file_name6 = '/home/johan/tlm_workspace/TLM_JB/moment1_TLM.txt'
[s, lng] = rd_long(file_name1)
lng[:, 1] /= 1e6
[s, lng_TLM] = rd_long_TLM(file_name2)
[s, moment0] = rd_data(file_name3)
[s, moment1] = rd_data(file_name4)
[s_TLM, moment0_TLM] = rd_data(file_name5)
[s_TLM, moment1_TLM] = rd_data(file_name6)
lng_diff = lng - lng_TLM
moment0_diff = moment0 - moment0_TLM
moment1_diff = moment1 - moment1_TLM
fig1 = plt_long(1, 'Ref Orbit for Corrected TLM', s, lng_TLM)
fig2 = plt_long(2, 'Ref Orbit for FLAME', s, lng)
fig3 = plt_long(3, 'Ref Orbit Difference Between FLAME and Corrected TLM',
s, lng_diff)
fig4 = plt0(4, 'Orbit for Corrected TLM', s, moment0_TLM)
fig5 = plt0(5, 'Orbit for FLAME', s, moment0)
fig6 = plt0(6, 'Orbit Difference Between FLAME and Corrected TLM',
s, moment0_diff)
fig7 = plt1(7, 'RMS Beam Size for Corrected TLM', s, moment1_TLM)
fig8 = plt1(8, 'RMS Beam Size for FLAME', s, moment1)
fig9 = plt1(9, 'RMS Beam Size Difference Between FLAME and Corrected TLM',
s, moment1_diff)
plt.rcParams['savefig.dpi'] = 600 # For png.
fig1.savefig('fig1_LS1-Target.eps', orientation='landscape')
fig2.savefig('fig2_LS1-Target.eps', orientation='landscape')
fig3.savefig('fig3_LS1-Target.eps', orientation='landscape')
fig4.savefig('fig4_LS1-Target.eps', orientation='landscape')
fig5.savefig('fig5_LS1-Target.eps', orientation='landscape')
fig6.savefig('fig6_LS1-Target.eps', orientation='landscape')
fig7.savefig('fig7_LS1-Target.eps', orientation='landscape')
fig8.savefig('fig8_LS1-Target.eps', orientation='landscape')
fig9.savefig('fig9_LS1-Target.eps', orientation='landscape')
ion(); show(); ioff()
raw_input('<ret> to continue>')
| mit | 7,870,611,750,167,640,000 | 29.885572 | 79 | 0.546875 | false | 2.785105 | false | false | false |
takeshineshiro/keystone | keystone/common/manager.py | 8 | 4942 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log
from oslo_log import versionutils
from oslo_utils import importutils
import stevedore
LOG = log.getLogger(__name__)
def response_truncated(f):
"""Truncate the list returned by the wrapped function.
This is designed to wrap Manager list_{entity} methods to ensure that
any list limits that are defined are passed to the driver layer. If a
hints list is provided, the wrapper will insert the relevant limit into
the hints so that the underlying driver call can try and honor it. If the
driver does truncate the response, it will update the 'truncated' attribute
in the 'limit' entry in the hints list, which enables the caller of this
function to know if truncation has taken place. If, however, the driver
layer is unable to perform truncation, the 'limit' entry is simply left in
the hints list for the caller to handle.
A _get_list_limit() method is required to be present in the object class
hierarchy, which returns the limit for this backend to which we will
truncate.
If a hints list is not provided in the arguments of the wrapped call then
any limits set in the config file are ignored. This allows internal use
of such wrapped methods where the entire data set is needed as input for
the calculations of some other API (e.g. get role assignments for a given
project).
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if kwargs.get('hints') is None:
return f(self, *args, **kwargs)
list_limit = self.driver._get_list_limit()
if list_limit:
kwargs['hints'].set_limit(list_limit)
return f(self, *args, **kwargs)
return wrapper
def load_driver(namespace, driver_name, *args):
try:
driver_manager = stevedore.DriverManager(namespace,
driver_name,
invoke_on_load=True,
invoke_args=args)
return driver_manager.driver
except RuntimeError as e:
LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
# Ignore failure and continue on.
@versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
in_favor_of='entrypoints',
what='direct import of driver')
def _load_using_import(driver_name, *args):
return importutils.import_object(driver_name, *args)
# For backwards-compatibility, an unregistered class reference can
# still be used.
return _load_using_import(driver_name, *args)
class Manager(object):
"""Base class for intermediary request layer.
The Manager layer exists to support additional logic that applies to all
or some of the methods exposed by a service that are not specific to the
HTTP interface.
It also provides a stable entry point to dynamic backends.
An example of a probable use case is logging all the calls.
"""
driver_namespace = None
def __init__(self, driver_name):
self.driver = load_driver(self.driver_namespace, driver_name)
def __getattr__(self, name):
"""Forward calls to the underlying driver."""
f = getattr(self.driver, name)
setattr(self, name, f)
return f
def create_legacy_driver(driver_class):
"""Helper function to deprecate the original driver classes.
The keystone.{subsystem}.Driver classes are deprecated in favor of the
new versioned classes. This function creates a new class based on a
versioned class and adds a deprecation message when it is used.
This will allow existing custom drivers to work when the Driver class is
renamed to include a version.
Example usage:
Driver = create_legacy_driver(CatalogDriverV8)
"""
module_name = driver_class.__module__
class_name = driver_class.__name__
class Driver(driver_class):
@versionutils.deprecated(
as_of=versionutils.deprecated.LIBERTY,
what='%s.Driver' % module_name,
in_favor_of='%s.%s' % (module_name, class_name),
remove_in=+2)
def __init__(self, *args, **kwargs):
super(Driver, self).__init__(*args, **kwargs)
return Driver
| apache-2.0 | -2,363,730,380,467,835,400 | 34.811594 | 79 | 0.666936 | false | 4.369584 | false | false | false |
decarlin/indra | indra/pysb_assembler.py | 1 | 4115 | from pysb import Model, Monomer, Parameter
from pysb.core import SelfExporter
from bel import bel_api
from biopax import biopax_api
from trips import trips_api
SelfExporter.do_export = False
class BaseAgentSet(object):
"""A container for a set of BaseAgents. Wraps a dict of BaseAgent instances."""
def __init__(self):
self.agents = {}
def get_create_agent(self, name):
"""Return agent with given name, creating it if needed."""
try:
agent = self.agents[name]
except KeyError:
agent = BaseAgent(name)
self.agents[name] = agent
return agent
def iteritems(self):
return self.agents.iteritems()
def __getitem__(self, name):
return self.agents[name]
class BaseAgent(object):
def __init__(self, name):
self.name = name
self.sites = []
self.site_states = {}
# The list of site/state configurations that lead to this agent
# being active (where the agent is currently assumed to have only
# one type of activity)
self.activating_mods = []
def create_site(self, site, states=None):
"""Create a new site on an agent if it doesn't already exist"""
if site not in self.sites:
self.sites.append(site)
if states is not None:
self.site_states.setdefault(site, [])
try:
states = list(states)
except TypeError:
return
self.add_site_states(site, states)
def add_site_states(self, site, states):
"""Create new states on a agent site if the site doesn't exist"""
for state in states:
if state not in self.site_states[site]:
self.site_states[site].append(state)
def add_activating_modification(self, activity_pattern):
self.activating_mods.append(activity_pattern)
def add_default_initial_conditions(model):
# Iterate over all monomers
for m in model.monomers:
set_base_initial_condition(model, m, 100.0)
def set_base_initial_condition(model, monomer, value):
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p)
class PysbAssembler(object):
def __init__(self):
self.statements = []
self.agent_set = None
def add_statements(self, stmts):
self.statements.extend(stmts)
def make_model(self, initial_conditions=True, policies=None):
model = Model()
# Keep track of which policies we're using
self.policies = policies
self.agent_set = BaseAgentSet()
# Collect information about the monomers/self.agent_set from the
# statements
for stmt in self.statements:
stmt.monomers(self.agent_set, policies=policies)
# Add the monomers to the model based on our BaseAgentSet
for agent_name, agent in self.agent_set.iteritems():
m = Monomer(agent_name, agent.sites, agent.site_states)
model.add_component(m)
# Iterate over the statements to generate rules
for stmt in self.statements:
stmt.assemble(model, self.agent_set, policies=policies)
# Add initial conditions
if initial_conditions:
add_default_initial_conditions(model)
return model
if __name__ == '__main__':
pa = PysbAssembler()
bp = bel_api.process_belrdf('data/RAS_neighborhood.rdf')
pa.add_statements(bp.statements)
# bp = bel_api.process_ndex_neighborhood("ARAF")
# pa.add_statements(bp.statements)
# tp = trips_api.process_text("BRAF phosphorylates MEK1 at Ser222")
# pa.add_statements(tp.statements)
model = pa.make_model()
| bsd-2-clause | 772,667,815,329,595,900 | 32.729508 | 83 | 0.619927 | false | 3.835042 | false | false | false |
ktmud/david | david/core/attachment/mixins.py | 1 | 2817 | # -*- coding: utf-8 -*-
from david.lib.mixins.props import PropsMixin, PropsItem
from .attachment import Attachment
from .form import AttachmentFieldList
def _get_ids(items):
return [(i.id if hasattr(i, 'id') else i) for i in items]
class AttachmentMixin(PropsMixin):
""" Mixin for a db.Model """
attachments = PropsItem('attachments', [])
def attachment_items(self, media_type=None):
media_filter = None
if media_type:
if isinstance(media_type, str):
media_type = (media_type,)
media_filter = lambda x: x and any([getattr(x, 'is_' + m) for m in media_type])
ret = filter(media_filter, Attachment.gets(self.attachments))
return ret
def attachment_pics(self):
return [x for x in self.attachment_items('image') if x.is_image]
def attachments_info(self, *args, **kwargs):
return [item.serialize() for item in self.attachment_items(*args, **kwargs)]
def add_attachments(self, items):
items = _get_ids(items)
self.attachments = list(set(self.attachments + items))
def remove_attachments(self, items):
items = _get_ids(items)
self.attachments = [i for i in self.attachments if i not in items]
@property
def _attachment_field(self, name):
return AttachmentField(name)
def attachment_fields(self, label=None, name='attachments',
max_entries=None):
if label is None:
label = _('Attachments')
attached = [x for x in self.attachment_items]
return AttachmentFieldList(
self._attachment_field(name),
label=label,
min_entries=1,
max_entries=max_entries,
default=attached)
class PictureMixin(AttachmentMixin):
_DEFAULT_PIC = None
@property
def picture(self):
if hasattr(self, 'attachments'):
for key in self.attachments:
a = Attachment.get(key)
if a and a.is_image:
return a
if hasattr(self, 'picture_id'):
return Attachment.get(self.picture_id)
def picture_url(self, category='small', default=True):
pic = self.picture
if pic:
return pic.url(category)
if not default:
return None
dft = self._DEFAULT_PIC.replace('%25s', '%s', 1)
if '%s' in dft:
return dft % category
return self._DEFAULT_PIC
class MediaMixin(PictureMixin):
def attachment_medias(self):
audios, videos = [], []
items = self.attachment_items()
for x in items:
if x.is_audio: audios.append(x)
elif x.is_video: videos.append(x)
return audios, videos
| mit | 7,379,642,408,356,192,000 | 28.968085 | 91 | 0.57863 | false | 3.973202 | false | false | false |
miing/mci_migo_packages_django-adminaudit | adminaudit/models.py | 1 | 4091 | # Copyright 2010-2012 Canonical Ltd. This software is licensed under
# the GNU Lesser General Public License version 3 (see the file LICENSE).
from django.core import serializers
from django.db import models
from django.db.models.fields.files import FileField
from django.utils import simplejson
class AuditLog(models.Model):
"""
Records of all changes made via Django admin interface.
"""
username = models.TextField()
user_id = models.IntegerField()
model = models.TextField()
change = models.CharField(max_length=100)
representation = models.TextField()
values = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, user, obj, change, new_object=None):
assert change in ['create', 'update', 'delete']
values = serializers.serialize("json", [obj])
# json[0] is for removing outside list, this serialization is only for
# complete separate objects, the list is unnecessary
json = simplejson.loads(values)[0]
if new_object:
values_new = serializers.serialize("json", [new_object])
json_new = simplejson.loads(values_new)[0]
json = {'new': json_new, 'old': json}
if change == 'delete':
file_fields = [f for f in obj._meta.fields
if isinstance(f, FileField)]
if len(file_fields) > 0:
json['files'] = {}
for file_field in file_fields:
field_name = file_field.name
file = getattr(obj, field_name)
if file.name:
json['files'][file.name] = file.read().encode('base64')
values_pretty = simplejson.dumps(json, indent=2, sort_keys=True)
return cls.objects.create(
username=user.username,
user_id=user.id,
model=str(obj._meta),
values=values_pretty,
representation=unicode(obj),
change=change,
)
class AdminAuditMixin(object):
def _flatten(self, lst):
result = []
for item in lst:
if isinstance(item, list):
result.extend(self._flatten(item))
else:
result.append(item)
return result
def _collect_deleted_objects(self, obj):
result = []
try:
# This is for Django up to 1.2
from django.db.models.query_utils import CollectedObjects
seen_objs = CollectedObjects()
obj._collect_sub_objects(seen_objs)
for cls, subobjs in seen_objs.iteritems():
for subobj in subobjs.values():
result.append(subobj)
except ImportError:
# Django 1.3 solution, those imports needs to be here, because
# otherwise they will fail on Django < 1.3.
from django.contrib.admin.util import NestedObjects
from django.db import router
using = router.db_for_write(obj)
collector = NestedObjects(using=using)
collector.collect([obj])
result = self._flatten(collector.nested())
return result
def log_addition(self, request, obj, *args, **kwargs):
AuditLog.create(request.user, obj, 'create')
super(AdminAuditMixin, self).log_addition(request, obj, *args, **kwargs)
def log_deletion(self, request, obj, *args, **kwargs):
for subobj in self._collect_deleted_objects(obj):
AuditLog.create(request.user, subobj, 'delete')
super(AdminAuditMixin, self).log_deletion(request, obj, *args, **kwargs)
def save_model(self, request, new_obj, form, change):
if change:
# This is so that we'll get the values of the object before the
# change
old_obj = new_obj.__class__.objects.get(pk=new_obj.pk)
AuditLog.create(request.user, old_obj, 'update', new_object=new_obj)
super(AdminAuditMixin, self).save_model(
request, new_obj, form, change)
| lgpl-3.0 | 7,664,659,598,751,858,000 | 37.59434 | 80 | 0.594476 | false | 4.187308 | false | false | false |
rreimche/infdiffusion | fetcher.py | 1 | 1566 | # Fetches twits from Twitter using one of many possible download mechanisms and parameters given via command line arguments
# This particular implementation uses twitterscraper as an example implementation.
# Warning: scraping twitter may be not completely legal in your country.
# You could use tweepy for a legal option that uses Twitter API.
import argparse
import pymongo
from pymongo.errors import BulkWriteError
from twitterscraper import query_tweets
# parse command line arguments
parser = argparse.ArgumentParser("fetcher")
parser.add_argument("database", help="Database to save to", type=str)
parser.add_argument("collection", help="Collection to save to", type=str)
parser.add_argument("query", help="Query", type=str)
parser.add_argument("limit", help="Limit of tweets to download", type=int, default=None)
args = parser.parse_args()
# connect to database
client = pymongo.MongoClient()
db = client[args.database]
collection = db[args.collection]
# get tweets
# other download mechanisms could be used instead of query_tweets() here.
tweets = []
for tweet in query_tweets(args.query, args.limit):
tweets.append({
"_id" : tweet.id,
"timestamp" : tweet.timestamp,
"user" : tweet.user,
"fullname" : tweet.fullname,
"text" : tweet.text
})
# save tweets to mongodb
try:
collection.insert_many(tweets)
print args.collection + " done"
except BulkWriteError as bwe:
print(bwe.details)
#you can also take this component and do more analysis
#werrors = bwe.details['writeErrors']
raise | mit | 5,688,962,702,954,137,000 | 33.065217 | 123 | 0.735632 | false | 3.782609 | false | false | false |
lavalamp-/ws-backend-community | tasknode/tasks/scanning/services/fingerprinting/http.py | 1 | 3763 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from celery.utils.log import get_task_logger
from lib.fingerprinting import HttpFingerprinter, HttpsFingerprinter
from .....app import websight_app
from ....base import ServiceTask, NetworkServiceTask
logger = get_task_logger(__name__)
#USED
@websight_app.task(bind=True, base=NetworkServiceTask)
def check_service_for_http(
self,
org_uuid=None,
network_service_scan_uuid=None,
ip_address=None,
port=None,
network_service_uuid=None,
order_uuid=None,
):
"""
Check to see if the given remote service is running HTTP.
:param org_uuid: The UUID of the organization to check the service on behalf of.
:param network_service_scan_uuid: The UUID of the network service scan that this service fingerprinting is
associated with.
:param ip_address: The IP address where the service is running.
:param port: The port where the service is running.
:param network_service_uuid: The UUID of the network service to check for HTTP service.
:return: None
"""
logger.info(
"Now checking to see if remote TCP service at %s:%s is running HTTP. Organization is %s, scan is %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
fingerprinter = HttpFingerprinter(ip_address=ip_address, port=port)
fingerprinter.perform_fingerprinting()
logger.info(
"TCP service at %s:%s found %s running HTTP."
% (ip_address, port, "to be" if fingerprinter.fingerprint_found else "not to be")
)
result_record = fingerprinter.to_es_model(model_uuid=network_service_scan_uuid, db_session=self.db_session)
result_record.save(org_uuid)
logger.info(
"Elasticsearch updated with HTTP fingerprint result for TCP endpoint %s:%s. Organization was %s, scan was %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
#USED
@websight_app.task(bind=True, base=NetworkServiceTask)
def check_service_for_https(
self,
org_uuid=None,
network_service_scan_uuid=None,
ip_address=None,
port=None,
ssl_version=None,
network_service_uuid=None,
order_uuid=None,
):
"""
Check to see if the given remote service is running HTTPS.
:param org_uuid: The UUID of the organization to check the service on behalf of.
:param network_service_scan_uuid: The UUID of the network service scan that this service fingerprinting is
associated with.
:param ip_address: The IP address where the service is running.
:param port: The port where the service is running.
:param ssl_version: The version of SSL to use to connect to the remote service.
:param network_service_uuid: The UUID of the network service to check for HTTP service.
:return: None
"""
logger.info(
"Now checking to see if remote TCP service at %s:%s is running HTTPS with SSL version %s. "
"Organization is %s, scan is %s."
% (ip_address, port, ssl_version, org_uuid, network_service_scan_uuid)
)
fingerprinter = HttpsFingerprinter(ip_address=ip_address, port=port, ssl_version=ssl_version)
fingerprinter.perform_fingerprinting()
logger.info(
"TCP service at %s:%s found %s running HTTPS."
% (ip_address, port, "to be" if fingerprinter.fingerprint_found else "not to be")
)
result_record = fingerprinter.to_es_model(model_uuid=network_service_scan_uuid, db_session=self.db_session)
result_record.save(org_uuid)
logger.info(
"Elasticsearch updated with HTTPS fingerprint result for TCP endpoint %s:%s. Organization was %s, scan was %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
| gpl-3.0 | -3,429,817,639,384,512,000 | 40.351648 | 119 | 0.685092 | false | 3.678397 | false | false | false |
kotnik/scanner | scanner/util.py | 1 | 1066 | import time
import uuid
import subprocess
import logging
log = logging.getLogger(__name__)
def check_call(*args, **kwargs):
start = time.time()
command_uuid = uuid.uuid4()
command_line = subprocess.list2cmdline(args[0])
log.debug("%s: Executing: %s" % (command_uuid, command_line))
data = None
if "data" in kwargs:
data = kwargs["data"]
del(kwargs["data"])
kwargs.setdefault("close_fds", True)
proc = subprocess.Popen(
*args, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs
)
(stdout_data, stderr_data) = proc.communicate(input=data)
if stdout_data.strip():
log.debug("OUT: %s" % stdout_data.strip().replace('\n', ' -- '))
if stderr_data.strip():
log.debug("ERR: %s" % stderr_data.strip().replace('\n', ' -- '))
log.debug("%s: %s: [code=%s, duration=%.1fs]" % (command_uuid, command_line, proc.returncode, time.time() - start))
if proc.returncode != 0:
return stderr_data.strip()
return stdout_data.strip()
| mit | 3,820,264,963,204,032,500 | 26.333333 | 119 | 0.61257 | false | 3.449838 | false | false | false |
gylian/sickrage | lib/guessit/transfo/post_process.py | 6 | 2845 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import subtitle_exts
from guessit.textutils import reorder_title, find_words
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
if 'language' in node.guess:
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if we find the word 'sub' before the language, and in the same explicit
# group, then upgrade the language
explicit_group = mtree.node_at(node.node_idx[:2])
group_str = explicit_group.value.lower()
if ('sub' in find_words(group_str) and
0 <= group_str.find('sub') < (node.span[0] - explicit_group.span[0])):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] == 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
node.guess['series'] = reorder_title(node.guess['series'])
| gpl-3.0 | 7,827,392,249,194,032,000 | 37.972603 | 83 | 0.629877 | false | 4.007042 | false | false | false |
eort/OpenSesame | opensesame_extensions/get_started/get_started.py | 2 | 3859 | #-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
import os
import sys
from libopensesame import misc, metadata
from libqtopensesame.extensions import base_extension
from libqtopensesame.misc.translate import translation_context
from libqtopensesame.misc import template_info
_ = translation_context(u'get_started', category=u'extension')
class get_started(base_extension):
"""
desc:
Shows the get-started tab and opens an experiment on startup, if one was
passed on the command line.
"""
def activate(self):
"""
desc:
Is called when the extension is activated through the menu/ toolbar
action.
"""
# Initialize templates
templates = []
for i, (path, desc) in enumerate(template_info.templates):
try:
path = self.experiment.resource(path)
except:
continue
if not i:
cls = u'important-button'
else:
cls = u'button'
path = os.path.abspath(path)
md = u'<a href="opensesame://%s" class="%s">%s</a><br />' \
% (path, cls, desc)
templates.append(md)
# Initialize recent experiments
if not self.main_window.recent_files:
recent = []
else:
recent = [_(u'Continue with a recent experiment:')+u'<br />']
for i, path in enumerate(self.main_window.recent_files):
cls = u'important-button' if not i else u'button'
md = u'<a href="opensesame://event.open_recent_%d" class="%s">%s</a><br />' % \
(i, cls, self._unambiguous_path(path))
recent.append(md)
# Create markdown
with safe_open(self.ext_resource(u'get_started.md')) as fd:
md = fd.read()
md = md % {
u'version' : metadata.__version__,
u'codename' : metadata.codename,
u'templates' : u' \n'.join(templates),
u'recent_experiments' : u' \n'.join(recent)
}
self.tabwidget.open_markdown(md, title=_(u'Get started!'),
icon=u'document-new')
def _unambiguous_path(self, path):
"""
desc:
If the path basename is unique among the resent experiments, this is
used. Otherwise, the full path is used.
arguments:
path: The path to shorten unambiguously.
returns:
The unambiguously shortened path.
"""
basename = os.path.basename(path)
basenames = \
[os.path.basename(_path) for _path in self.main_window.recent_files]
return path if basenames.count(basename) > 1 else basename
def event_open_recent_0(self):
self.main_window.open_file(path=self.main_window.recent_files[0])
def event_open_recent_1(self):
self.main_window.open_file(path=self.main_window.recent_files[1])
def event_open_recent_2(self):
self.main_window.open_file(path=self.main_window.recent_files[2])
def event_open_recent_3(self):
self.main_window.open_file(path=self.main_window.recent_files[3])
def event_open_recent_4(self):
self.main_window.open_file(path=self.main_window.recent_files[4])
def event_startup(self):
"""
desc:
Called on startup.
"""
# Open an experiment if it has been specified as a command line argument
# and suppress the new wizard in that case.
if len(sys.argv) >= 2 and os.path.isfile(sys.argv[1]):
path = safe_decode(sys.argv[1], enc=misc.filesystem_encoding(),
errors=u'ignore')
self.main_window.open_file(path=path)
return
self.activate()
| gpl-3.0 | 4,596,173,658,743,343,600 | 29.626984 | 83 | 0.700441 | false | 3.139951 | false | false | false |
rpwagner/tiled-display | flTile/configs/localTestLargeConfig.py | 1 | 1310 | import sys, os
sys.path = [os.path.join(os.getcwd(), "..") ] + sys.path
from tileConfig import TileConfig, TileDesc, MachineDesc, SaveConfig, LoadConfig, TileLocation, Rect, LocalWindow
def CreateLocalTestConfig():
c = TileConfig()
t0 = TileDesc( (400, 400), (0,0), ":0", localWindowId=0)
t1 = TileDesc( (400, 400), (400, 0), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t0.uid), localWindowId=0)
print "t1 relative:", t1.location.relative
t2 = TileDesc( (400, 400), (0,400), ":0", localWindowId=0, location=TileLocation( (0,400), relative=t0.uid))
t3 = TileDesc( (400, 400), (400, 400), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t2.uid), localWindowId=0)
localWindow = LocalWindow(Rect(0,0,800,800))
m3 = MachineDesc( "maze", tiles = [t0, t1, t2, t3], windows=[localWindow])
c.addMachine(m3)
return c
if __name__ == "__main__":
c = CreateLocalTestConfig()
SaveConfig(c, "/tmp/testconfig")
print c.asDict()
c2 = LoadConfig("/tmp/testconfig")
if c == c2:
print "PASS: Saved and reread config matched original."
else:
print "FAIL: Saved and reread config did not match original. Saving as testconfig2 for comparison"
SaveConfig(c2, "/tmp/testconfig2")
| apache-2.0 | 6,773,218,746,586,381,000 | 37.529412 | 139 | 0.644275 | false | 2.917595 | true | false | false |
JrGoodle/clowder | clowder/util/file_system.py | 1 | 2610 | """File system utilities
.. codeauthor:: Joe DeCapo <[email protected]>
"""
import errno
import os
import shutil
from pathlib import Path
import clowder.util.formatting as fmt
from clowder.util.error import ExistingFileError, MissingSourceError
from clowder.util.logging import LOG
def symlink_clowder_yaml(source: Path, target: Path) -> None:
"""Force symlink creation
:param Path source: File to create symlink pointing to
:param Path target: Symlink location
:raise ExistingFileError:
:raise MissingSourceError:
"""
if not target.is_symlink() and target.is_file():
raise ExistingFileError(f"Found non-symlink file {fmt.path(target)} at target path")
if not Path(target.parent / source).exists():
raise MissingSourceError(f"Symlink source {fmt.path(source)} appears to be missing")
if target.is_symlink():
remove_file(target)
try:
path = target.parent
fd = os.open(path, os.O_DIRECTORY)
os.symlink(source, target, dir_fd=fd)
os.close(fd)
except OSError:
LOG.error(f"Failed to symlink file {fmt.path(target)} -> {fmt.path(source)}")
raise
def remove_file(file: Path) -> None:
"""Remove file
:param Path file: File path to remove
"""
os.remove(str(file))
def create_backup_file(file: Path) -> None:
"""Copy file to {file}.backup
:param Path file: File path to copy
"""
shutil.copyfile(str(file), f"{str(file)}.backup")
def restore_from_backup_file(file: Path) -> None:
"""Copy {file}.backup to file
:param Path file: File path to copy
"""
shutil.copyfile(f"{file}.backup", file)
def make_dir(directory: Path, check: bool = True) -> None:
"""Make directory if it doesn't exist
:param str directory: Directory path to create
:param bool check: Whether to raise exceptions
"""
if directory.exists():
return
try:
os.makedirs(str(directory))
except OSError as err:
if err.errno == errno.EEXIST:
LOG.error(f"Directory already exists at {fmt.path(directory)}")
else:
LOG.error(f"Failed to create directory {fmt.path(directory)}")
if check:
raise
def remove_directory(dir_path: Path, check: bool = True) -> None:
"""Remove directory at path
:param str dir_path: Path to directory to remove
:param bool check: Whether to raise errors
"""
try:
shutil.rmtree(dir_path)
except shutil.Error:
LOG.error(f"Failed to remove directory {fmt.path(dir_path)}")
if check:
raise
| mit | 8,639,001,168,381,263,000 | 24.588235 | 92 | 0.644061 | false | 3.760807 | false | false | false |
alonsopg/AuthorProfiling | src/ef_list_baseline.py | 1 | 3394 | #!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function
import argparse
import codecs
import cPickle as pickle
import numpy as np
import os
from load_tweets import load_tweets
from collections import Counter
NAME='ef_list_baseline'
prefix='list_baseline'
if __name__ == "__main__":
# Las opciones de línea de comando
p = argparse.ArgumentParser(NAME)
p.add_argument("DIR",default=None,
action="store", help="Directory with corpus")
p.add_argument("LIST1",default=None,
action="store", help="File with list of words")
p.add_argument("LIST2",default=None,
action="store", help="File with list of words")
p.add_argument("-d", "--dir",
action="store", dest="dir",default="feats",
help="Default directory for features [feats]")
p.add_argument("-p", "--pref",
action="store", dest="pref",default=prefix,
help="Prefix to save the file of features %s"%prefix)
p.add_argument("--mix",
action="store_true", dest="mix",default=True,
help="Mix tweets into pefiles")
p.add_argument("--format",
action="store_true", dest="format",default="pan15",
help="Change to pan14 to use format from 2015 [feats]")
p.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
help="Verbose mode [Off]")
p.add_argument("--stopwords", default=None,
action="store", dest="stopwords",
help="List of stop words [data/stopwords.txt]")
opts = p.parse_args()
if opts.verbose:
def verbose(*args):
print(*args)
else:
verbose = lambda *a: None
# Colecta los tweets y sus identificadores (idtweet y idusuario)
tweets,ids=load_tweets(opts.DIR,opts.format,mix=opts.mix)
# Imprime alguna información sobre los tweets
if opts.verbose:
for i,tweet in enumerate(tweets[:10]):
verbose('Tweet example',i+1,tweet[:100])
verbose("Total tweets : ",len(tweets))
try:
verbose("Total usuarios : ",len(set([id for x,id in ids])))
except ValueError:
verbose("Total usuarios : ",len(ids))
# Calculamos los features
# - Cargar lista de palabras uno
list_of_words1 = [line.strip() for line in codecs.open(opts.LIST1,encoding='utf-8') if
len(line.strip())>0]
list_of_words2 = [line.strip() for line in codecs.open(opts.LIST2,encoding='utf-8') if
len(line.strip())>0]
counts = []
for i,j in enumerate(tweets):
c=Counter(j)
countador=sum([c[x] for x in list_of_words1])
countador_2=sum([c[x] for x in list_of_words2])
counts.append((countador,countador_2))
# - Contamos las palabras en los tweets
feats = np.asarray(counts)
# Guarda la matrix de features
with open(os.path.join(opts.dir,opts.pref+'.dat'),'wb') as idxf:
pickle.dump(feats, idxf, pickle.HIGHEST_PROTOCOL)
# Imprimimos información de la matrix
verbose("Total de features :",feats.shape[1])
verbose("Total de renglones:",feats.shape[0])
# Guarda los indices por renglones de la matrix (usuario o tweet, usuario)
with open(os.path.join(opts.dir,opts.pref+'.idx'),'wb') as idxf:
pickle.dump(ids, idxf, pickle.HIGHEST_PROTOCOL)
| gpl-2.0 | 5,566,480,035,817,848,000 | 30.398148 | 91 | 0.611914 | false | 3.474385 | false | false | false |
jn0/fb2utils | fb2utils/update_librusec.py | 1 | 27958 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# (c) Lankier mailto:[email protected]
import sys, os
import time
import shutil
import sqlite3
from cStringIO import StringIO
from lxml import etree
from copy import deepcopy
from optparse import OptionParser, make_option
import zipfile
import traceback
from utils import walk, read_file, prog_version, print_log, LogOptions, check_xml, count_files, print_exc
from parser import FB2Parser
# global vars
db_file = None
_connect = None
options = None
not_deleted_list = None
update_time = None
fsenc = sys.getfilesystemencoding()
namespaces = {'m': 'http://www.gribuser.ru/xml/fictionbook/2.0',
'xlink':'http://www.w3.org/1999/xlink',
'l':'http://www.w3.org/1999/xlink'}
# statistics
class stats:
total_files = 0
total = 0
passed = 0
fixed = 0
errors = 0
def insert_values(curs, tbl_name, s):
i = s.index('VALUES') + len('VALUES')
values = s[i:].strip()
if values.endswith(';'):
values = values[:-1]
values = values.split('),(')
for v in values:
if not v.startswith('('):
v = '(' + v
if not v.endswith(')'):
v = v + ')'
v = v.replace('\\\\', '\x00') # temporary replace backslashes
v = v.replace("\\'", "''") # replace escape \' -> ''
v = v.replace('\x00', '\\') # return backslashes
sql = 'insert into %s values %s' % (tbl_name, v)
try:
curs.execute(sql)
except:
print 'SQL:', repr(sql)
raise
def mksql(fn, tbl_name):
global _connect
curs = _connect.cursor()
curs.execute('DROP TABLE IF EXISTS `%s`' % tbl_name)
sql = []
start = False
data = open(fn).read(2)
if data == '\x1f\x8b':
import gzip
f = gzip.open(fn, 'rb')
data = f.read()
f.close()
fd = StringIO(data)
else:
fd = open(fn)
for s in fd:
if s.startswith(')'):
break
if s.startswith('CREATE TABLE'):
start = True
sql.append('CREATE TABLE `%s` (\n' % tbl_name)
elif start:
if s.strip().startswith('KEY'):
continue
elif s.strip().startswith('FULLTEXT KEY'):
continue
elif s.strip().startswith('UNIQUE KEY'):
continue
else:
#s = s.replace('auto_increment', 'AUTOINCREMENT')
s = s.replace('auto_increment', '')
s = s.replace('character set utf8', '')
s = s.replace('collate utf8_bin', '')
s = s.replace('collate utf8_unicode_ci', '')
s = s.replace('unsigned', '')
s = s.replace('COMMENT', ', --')
s = s.replace('USING BTREE', '')
#s = s.replace('UNIQUE KEY', 'UNIQUE')
sql.append(s)
sql = ''.join(sql).strip()
if sql.endswith(','):
sql = sql[:-1]
sql = sql+'\n)'
curs.execute(sql)
#
update_time = None
found = False
for s in fd:
if s.startswith('INSERT INTO'):
insert_values(curs, tbl_name, s)
found = True
elif s.startswith('-- Dump completed on'):
ut = s[len('-- Dump completed on'):].strip().replace(' ', ' ')
if update_time is None:
update_time = ut
else:
update_time = min(ut, update_time)
_connect.commit()
if not found:
raise ValueError('insert sql instruction not found')
return update_time
def update_db():
global _connect
sql_tables = (
#('lib.libactions.sql', 'libactions'),
#('lib.libavtoraliase.sql', 'libavtoraliase'),
('lib.libavtorname.sql', 'libavtorname'),
('lib.libavtor.sql', 'libavtor'),
#('lib.libblocked.sql', 'libblocked'),
('lib.libbook.old.sql', 'libbookold'),
('lib.libbook.sql', 'libbook'),
#('lib.libdonations.sql', 'libdonations'),
#('lib.libfilename.sql', 'libfilename'),
('lib.libgenrelist.sql', 'libgenrelist'),
('lib.libgenre.sql', 'libgenre'),
#('lib.libjoinedbooks.sql', 'libjoinedbooks'),
#('lib.libpolka.sql', 'libpolka'),
('lib.libseqname.sql', 'libseqname'),
('lib.libseq.sql', 'libseq'),
#('lib.libsrclang.sql', 'libsrclang'),
('lib.libtranslator.sql', 'libtranslator'),
)
update_time = None
for fn, tbl_name in sql_tables:
fn = os.path.join(options.sql_dir, fn)
if not os.path.exists(fn):
fn = fn + '.gz'
if not os.path.exists(fn):
print_log('ERROR: file not found:', fn, level=3)
return False
ut = mksql(fn, tbl_name)
if tbl_name != 'libbookold':
# skip libbookold
update_time = ut
curs = _connect.cursor()
curs.execute('DROP TABLE IF EXISTS librusec')
curs.execute('CREATE TABLE librusec ( update_time varchar(32) )')
curs.execute('INSERT INTO librusec VALUES (?)', (update_time,))
_connect.commit()
return True
# ????????????:
# T - translit
# L - lowercase
# R - remove FAT invalid chars
# B - big file names (do not strip file names to 255 chars)
# _ - replace all space to underscore
# ??????????:
# m - meta genre
# g - genre
# L - first letter in author last-name
# f - authors full-name
# F - first author full-name
# a - authors last-name (or nickname)
# A - first author last-name (or nickname)
# t - title
# s - (sequence #numder)
# S - sequence number
def get_filename(book_info):
format = options.fn_format
f = format.split(':')
mods = ''
if len(f) > 2:
return None
if len(f) == 2:
mods, format = f
if '_' in mods:
sep = '_'
else:
sep = ' '
fn_tbl = {
'm': 'metagen',
'g': 'genre',
'l': 'lang',
't': 'title',
'L': 'first_letter',
'a': 'name',
'A': 'first_name',
'f': 'full_name',
'F': 'first_full_name',
's': 'seq1',
'S': 'seq2',
'b': 'bookid',
}
#
book_info['bookid'] = str(book_info['bookid'])
# metagenre
book_info['metagen'] = list(book_info['metagen'])[0]
# genre
book_info['genre'] = book_info['genres'][0]
# authors
full_names = []
names = []
first_name = ''
first_full_name = ''
first_letter = ''
for a in book_info['authors']:
aut = []
name = a[2]
if a[2]: # last-name
aut.append(a[2])
aut.append(a[0])
aut.append(a[1])
elif a[3]: # nickname
aut.append(a[3])
name = a[3]
else:
aut.append(a[2])
aut.append(a[0])
aut.append(a[1])
aut = sep.join(aut).strip()
full_names.append(aut)
names.append(name)
if not first_name:
first_name = name
first_full_name = aut
first_letter = aut[0]
if len(names) > 3:
# ???????
names = [names[0], '...']
full_names = [full_names[0], '...']
if '_' in mods:
book_info['name'] = '_'.join(names)
book_info['full_name'] = '_'.join(full_names)
else:
book_info['name'] = ', '.join(names)
book_info['full_name'] = ', '.join(full_names)
book_info['first_name'] = first_name
book_info['first_full_name'] = first_full_name
book_info['first_letter'] = first_letter.upper()
# sequence
if book_info['sequences']:
seq = tuple(book_info['sequences'][0])
book_info['seq1'] = '(%s #%s)' % seq
book_info['seq2'] = '%s %s' % seq
else:
book_info['seq1'] = book_info['seq2'] = ''
# replace '/' and '\'
for n in ('name', 'full_name', 'first_name', 'first_full_name',
'title', 'seq1', 'seq2'):
book_info[n] = book_info[n].replace('/', '%').replace('\\', '%')
# generate filename
f = []
for c in list(format):
if c in fn_tbl:
k = book_info[fn_tbl[c]]
if k:
f.append(k)
elif c in 'sS':
if f and f[-1] == ' ':
f = f[:-1]
else:
f.append(c)
fn = ''.join(f)
#
fn = fn.strip()
if 'R' in mods:
for c in '|?*<>":+[]': # invalid chars in VFAT
fn = fn.replace(c, '')
if '_' in mods:
fn = fn.replace(' ', '_')
if 'L' in mods:
fn = fn.lower()
if 'T' in mods:
# translit
from unidecode import unidecode
fn = unidecode(fn)
elif not os.path.supports_unicode_filenames:
fn = fn.encode(fsenc, 'replace')
max_path_len = 247
if 'B' not in mods and len(fn) > max_path_len:
fn = fn[:max_path_len]
if fsenc.lower() == 'utf-8':
# utf-8 normalisation
fn = unicode(fn, 'utf-8', 'ignore').encode('utf-8')
fn = os.path.join(options.out_dir, fn)
return fn
def get_bookid(filename, fb2):
global _connect
# search bookid in fb2
if options.search_id and fb2 is not None:
find = xpath('/m:FictionBook/m:description/m:custom-info')
bookid = None
for e in find(fb2):
bid = e.get('librusec-book-id')
if bid is not None:
try:
bookid = int(bid)
except:
pass
else:
return bookid
# search bookid by filename
try:
bookid = int(filename)
except ValueError:
curs = _connect.cursor()
curs.execute("SELECT BookId FROM libbookold WHERE FileName = ?",
(filename,))
res = curs.fetchone()
if res is None:
print_log('ERROR: file not found in db:', filename, level=3)
return None
return res[0]
return bookid
def is_deleted(bookid):
global _connect
curs = _connect.cursor()
curs.execute("SELECT Deleted FROM libbook WHERE BookId = ?", (bookid,))
res = curs.fetchone()
if res is None:
print >> sys.stderr, 'updatedb.is_deleted: internal error'
return None
return bool(res[0])
def create_fb2(data):
if not check_xml(data):
return None
try:
fb2 = etree.XML(data)
except:
#print_exc()
if not options.nofix:
try:
data = str(FB2Parser(data, convertEntities='xml'))
options.file_fixed = True
fb2 = etree.XML(data)
except:
print_exc()
return None
else:
stats.fixed += 1
else:
return None
return fb2
_xpath_cash = {}
def xpath(path):
# optimisation
if path in _xpath_cash:
return _xpath_cash[path]
find = etree.XPath(path, namespaces=namespaces)
_xpath_cash[path] = find
return find
def update_fb2(fb2, bookid):
# initialisation
# 1. db
global _connect
curs = _connect.cursor()
# 2. xml
find = xpath('/m:FictionBook/m:description/m:title-info')
old_ti = find(fb2)[0] # old <title-info>
new_ti = etree.Element('title-info') # new <title-info>
# 3. routines
xp_prefix = '/m:FictionBook/m:description/m:title-info/m:'
def copy_elem(elem):
# just copy old elements
find = xpath(xp_prefix+elem)
for e in find(fb2):
new_ti.append(deepcopy(e))
def add_authors(table, column, elem_name, add_unknown=False):
authors = []
sql = '''SELECT
FirstName, MiddleName, LastName, NickName, Homepage, Email
FROM libavtorname JOIN %s ON libavtorname.AvtorId = %s.%s
WHERE BookId = ?''' % (table, table, column)
curs.execute(sql, (bookid,))
res = curs.fetchall()
if res:
for a in res:
author = etree.Element(elem_name)
aut = []
i = 0
for e in ('first-name', 'middle-name', 'last-name',
'nickname', 'home-page', 'email'):
if a[i]:
elem = etree.Element(e)
elem.text = a[i]
author.append(elem)
aut.append(a[i])
else:
aut.append('')
i += 1
new_ti.append(author)
authors.append(aut)
elif add_unknown:
author = etree.Element(elem_name)
elem = etree.Element('last-name')
elem.text = u'????? ??????????'
author.append(elem)
new_ti.append(author)
authors.append(['', '', u'????? ??????????', ''])
return authors
#
book_info = {'bookid': bookid}
# generation <title-info>
# 1. <genre>
curs.execute('SELECT GenreId FROM libgenre WHERE BookId = ?', (bookid,))
genres = []
metagen = set()
res = curs.fetchall()
if res:
for i in res:
curs.execute('''SELECT GenreCode, GenreMeta FROM libgenrelist
WHERE GenreId = ? LIMIT 1''', i)
res = curs.fetchone()
name = res[0]
genre = etree.Element('genre')
genre.text = name
new_ti.append(genre)
genres.append(name)
metagen.add(res[1])
else:
genres = ['other']
genre = etree.Element('genre')
genre.text = 'other'
new_ti.append(genre)
metagen = [u'??????']
book_info['genres'] = genres
book_info['metagen'] = metagen
# 2. <author>
authors = add_authors('libavtor', 'AvtorId', 'author', add_unknown=True)
book_info['authors'] = authors
# 3. <book-title>
curs.execute('''SELECT Title, Title1, Lang, Time FROM libbook
WHERE BookId = ? LIMIT 1''', (bookid,))
title_text, title1_text, lang_text, added_time = curs.fetchone()
lang_text = lang_text.lower()
title_text = title_text.strip()
title1_text = title1_text.strip()
title = etree.Element('book-title')
if title1_text:
title.text = '%s [%s]' % (title_text, title1_text)
else:
title.text = title_text
new_ti.append(title)
book_info['title'] = title_text
book_info['title1'] = title1_text
# 4. <annotation>
copy_elem('annotation')
# 5. <keywords>
copy_elem('keywords')
# 6. <date>
copy_elem('date')
# 7. <coverpage>
copy_elem('coverpage')
# 8. <lang>
lang = etree.Element('lang')
lang.text = lang_text
new_ti.append(lang)
book_info['lang'] = lang_text
# 9. <src-lang>
copy_elem('src-lang')
# 10. <translator>
add_authors('libtranslator', 'TranslatorId', 'translator')
# 11. <sequence>
sequences = []
if 1:
curs.execute("""SELECT SeqName, SeqNumb
FROM libseq JOIN libseqname USING (SeqId)
WHERE BookId = ? AND SeqName != '' """, (bookid,))
else:
curs.execute("""SELECT SeqName, SeqNumb
FROM libseq JOIN libseqname USING(SeqId)
WHERE BookId = ? ORDER BY level LIMIT 1""", (bookid,))
for seq in curs.fetchall():
sequence = etree.Element('sequence')
sequence.attrib['name'] = seq[0]
sequence.attrib['number'] = str(seq[1])
new_ti.append(sequence)
sequences.append([seq[0], str(seq[1])])
book_info['sequences'] = sequences
# finalisation
# 1. replace <title-info>
find = xpath('/m:FictionBook/m:description')
desc = find(fb2)[0]
desc.replace(old_ti, new_ti)
# 2. add/update <custom-info>
bookid_found = False
add_ti_found = False
added_time_found = False
update_time_found = False
updater_found = False
fixer_found = False
find = xpath('/m:FictionBook/m:description/m:custom-info')
for ci in find(fb2):
it = ci.get('info-type')
if not it:
if it is None:
print_log('WARNING: <custom-info> has no attribute "info-type"')
elif it == 'librusec-book-id':
bookid_found = True
elif it == 'librusec-add-title-info':
ci.text = title1_text
add_ti_found = True
elif it == 'librusec-added-at':
ci.text = added_time
added_time_found = True
elif it == 'librusec-updated-at':
ci.text = update_time
update_time_found = True
elif it == 'librusec-updater' and ci.text == 'fb2utils':
updater_found = True
elif it == 'fixed-by' and ci.text == 'fb2utils':
fixer_found = True
if not bookid_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-book-id'
ci.text = str(bookid)
desc.append(ci)
if not add_ti_found and title1_text:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-add-title-info'
ci.text = title1_text
desc.append(ci)
if not added_time_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-added-at'
ci.text = added_time
desc.append(ci)
if not update_time_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-updated-at'
ci.text = update_time
desc.append(ci)
if not updater_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-updater'
ci.text = 'fb2utils'
desc.append(ci)
if options.file_fixed and not fixer_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'fixed-by'
ci.text = 'fb2utils'
desc.append(ci)
# done
return etree.tostring(fb2, encoding=options.output_encoding,
xml_declaration=True), book_info
def copy_fb2(filename, data, to_dir=None, msg='save bad fb2 file:'):
if to_dir is None:
if not options.save_bad:
return
to_dir = options.save_bad
filename = str(filename)+'.fb2'
fn = os.path.join(to_dir, filename)
print_log(msg, fn)
if options.nozip:
open(fn).write(data)
else:
save_zip(fn, filename, data)
def save_zip(out_file, out_fn, data):
out_file = out_file+'.zip'
zf = zipfile.ZipFile(out_file, 'w', zipfile.ZIP_DEFLATED)
zipinfo = zipfile.ZipInfo()
zipinfo.filename = out_fn
zipinfo.external_attr = 0644 << 16L # needed since Python 2.5
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(zipinfo, data)
#zf.writestr(out_fn, data)
def base_name(filename, ext='.fb2'):
if not filename.endswith(ext):
return None
return os.path.basename(filename)[:-len(ext)]
def process_file(fn, ftype, z_filename, data):
# 0. logging
LogOptions.filename = os.path.abspath(fn)
stats.total += 1
options.file_fixed = False
if options.log_file and (stats.total % 10) == 0:
# progress
tm = time.time() - stats.starttime
eta = stats.total_files * tm / stats.total - tm
h = int(eta / 3600)
m = (eta - h * 3600) / 60
s = eta % 60
sys.stdout.write('\r%d out of %d (ETA: %02dh %02dm %02ds)' %
(stats.total, stats.total_files, h, m, s))
sys.stdout.flush()
if ftype == 'error':
# unzip error
print_log('ERROR:', z_filename, level=3)
stats.errors += 1
return False
filename = fn
if z_filename:
LogOptions.z_filename = z_filename
filename = z_filename
# 1. search bookid
f = base_name(filename)
if f is None:
# filename does not ends with 'fb2'
stats.errors += 1
print_log('ERROR: bad filename:', z_filename, level=3)
copy_fb2('unknown-id-'+str(stats.errors), data)
return False
if options.search_id:
fb2 = create_fb2(data)
bookid = get_bookid(f, fb2)
else:
bookid = get_bookid(f, None)
if bookid is None:
stats.errors += 1
print_log('ERROR: unknown bookid', level=3)
copy_fb2('unknown-id-'+str(stats.errors), data)
return False
print_log('bookid =', str(bookid))
# 2. check is deleted
if not options.nodel and bookid not in not_deleted_list:
print_log('deleted, skip')
if options.save_deleted:
copy_fb2(bookid, data, options.save_deleted,
'save deleted file:')
return False
# 3. update not_deleted_list
if bookid in not_deleted_list:
not_deleted_list.remove(bookid)
else:
print 'INTERNAL ERROR:', bookid, 'not in not_deleted_list'
# 4. create fb2 (dom) if not
if not options.search_id:
fb2 = create_fb2(data)
if fb2 is None:
stats.errors += 1
copy_fb2(bookid, data)
return False
# 5. update
if not options.noup:
try:
d, book_info = update_fb2(fb2, bookid)
except:
print_exc()
stats.errors += 1
copy_fb2(bookid, data)
return False
data = d
# 6. save result
out_fn = str(bookid)+'.fb2'
if options.fn_format:
out_file = get_filename(book_info)
if not out_file:
out_file = os.path.join(options.out_dir, out_fn)
else:
out_file = out_file+'.fb2'
d = os.path.dirname(out_file)
if os.path.isdir(d):
pass
elif os.path.exists(d):
print_log('ERROR: file exists:', d, level=3)
return False
else:
os.makedirs(d)
else:
out_file = os.path.join(options.out_dir, out_fn)
if options.nozip:
open(out_file, 'w').write(data)
else:
try:
save_zip(out_file, out_fn, data)
except:
print
print '>>', len(out_file), out_file
raise
stats.passed += 1
return True
def process(arg):
global not_deleted_list, update_time
curs = _connect.cursor()
res = curs.execute("SELECT BookId FROM libbook WHERE NOT (Deleted&1) and FileType = 'fb2' ")
not_deleted_list = curs.fetchall()
not_deleted_list = set([i[0] for i in not_deleted_list])
curs.execute('SELECT * FROM librusec')
update_time = curs.fetchone()[0]
for fn in walk(arg):
for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
process_file(fn, ftype, z_filename, data)
if options.search_deleted:
deleted = set()
for fn in walk(options.search_deleted):
bookid = base_name(fn, '.fb2.zip')
try:
bookid = int(bookid)
except ValueError:
continue
if bookid in not_deleted_list:
deleted.append(fn)
for fn in deleted:
for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
ret = process_file(fn, ftype, z_filename, data)
if ret:
print_log('restore deleted:', bookid)
print
print 'processed:', stats.total
print 'passed:', stats.passed
print 'fixed:', stats.fixed
print 'errors:', stats.errors
if options.not_found:
fd = open(options.not_found, 'w')
for bookid in not_deleted_list:
print >> fd, bookid
def main():
# parsing command-line options
global options, db_file, _connect
sql_dir = os.path.join(os.path.dirname(sys.argv[0]), 'sql')
option_list = [
make_option("-o", "--out-dir", dest="out_dir",
metavar="DIR", help="save updated fb2 files to this dir"),
make_option("-g", "--generate-db", dest="update_db",
action="store_true", default=False,
help="generate db"),
make_option("-d", "--do-not-delete", dest="nodel",
action="store_true", default=False,
help="don't delete duplicate files"),
make_option("-f", "--do-not-fix", dest="nofix",
action="store_true", default=False,
help="don't fix an xml"),
make_option("-u", "--do-not-update", dest="noup",
action="store_true", default=False,
help="don't update fb2 meta info"),
make_option("-z", "--do-not-zip", dest="nozip",
action="store_true",
default=False, help="don't zip result files"),
make_option("-i", "--search-id", dest="search_id",
action="store_true",
default=False, help="search bookid in fb2"),
make_option("-a", "--save-deleted", dest="save_deleted",
metavar="DIR", help="save deleted fb2 files to this dir"),
make_option("-c", "--search-deleted", dest="search_deleted",
metavar="DIR", help="search deleted fb2 files in this dir"),
make_option("-b", "--save-bad-fb2", dest="save_bad",
metavar="DIR", help="save bad fb2 files to this dir"),
make_option("-s", "--sql-dir", dest="sql_dir",
default=sql_dir, metavar="DIR",
help="search sql files in this dir"),
make_option("-e", "--output-encoding", dest="output_encoding",
default = 'utf-8', metavar="ENC",
help="fb2 output encoding"),
make_option("-l", "--log-file", dest="log_file",
metavar="FILE",
help="output log to this file"),
make_option("-n", "--not-found-file", dest="not_found",
metavar="FILE",
help="save missing books to this file"),
make_option("-F", "--filename-pattern", dest="fn_format",
metavar="PATTERN",
help="output filenames pattern"),
]
parser = OptionParser(option_list=option_list,
usage=("usage: %prog [options] "
"input-files-or-dirs"),
version="%prog "+prog_version)
options, args = parser.parse_args()
LogOptions.level = 0
db_file = os.path.join(options.sql_dir, 'db.sqlite')
_connect = sqlite3.connect(db_file)
if options.update_db:
# update db
print_log('start update db')
ret = update_db()
if ret:
print_log('done')
else:
print_log('fail')
return
if len(args) == 0:
return
#
if len(args) == 0:
sys.exit('wrong num args')
in_file = args[0]
if not options.out_dir:
sys.exit('option --out-dir required')
for f in args:
if not os.path.exists(f):
sys.exit('file does not exists: '+f)
if not os.path.isdir(options.out_dir):
sys.exit('dir does not exists: '+options.out_dir)
if options.save_bad and not os.path.isdir(options.save_bad):
sys.exit('dir does not exists: '+options.save_bad)
if options.save_deleted and not os.path.isdir(options.save_deleted):
sys.exit('dir does not exists: '+options.save_deleted)
if not os.path.exists(db_file):
print_log('start update db')
ret = update_db()
if ret:
print_log('done')
else:
print_log('fail')
return
#
stats.total_files = count_files(args)
print 'total files:', stats.total_files
if options.log_file:
LogOptions.outfile = open(options.log_file, 'w')
stats.starttime = time.time()
process(args)
et = time.time() - stats.starttime
print 'elapsed time: %.2f secs' % et
if __name__ == '__main__':
#main()
print update_fb2(open('../example.fb2').read(), 55142)
| gpl-3.0 | -3,015,341,561,297,718,300 | 32.684337 | 105 | 0.526433 | false | 3.565162 | false | false | false |
TwilioDevEd/call-tracking-django | call_tracking/migrations/0001_initial.py | 1 | 1350 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LeadSource',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=100, blank=True)),
('incoming_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, unique=True)),
('forwarding_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, blank=True)),
],
),
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=2)),
('source', models.ForeignKey(to='call_tracking.LeadSource')),
],
),
]
| mit | -8,577,940,808,367,990,000 | 38.705882 | 114 | 0.591852 | false | 4.205607 | false | false | false |
NaviRice/KinectServer | python_src/navirice_get_image.py | 1 | 3410 | # Echo client program
import socket
import time
import os
os.system("protoc -I=../proto --python_out=. ../proto/navirice_image.proto")
import navirice_image_pb2
class Image:
def __init__(self, width, height, channels, type_, data, data_size):
self.width = width
self.height = height
self.channels = channels
self.type_ = type_
self.data = data
self.data_size = data_size
class KinectClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
self.last_count = 0
def reconnect(self):
self.s.close()
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
def navirice_capture_settings(self, rgb, ir, depth):
print("---Requesting new settings...")
settings = navirice_image_pb2.ProtoCaptureSetting()
settings.IR = ir
settings.RGB = rgb
settings.Depth = depth
settings.count = 1
request_msg = navirice_image_pb2.ProtoRequest()
request_msg.state = navirice_image_pb2.ProtoRequest.CAPTURE_SETTING
request_msg.count = 1
request_msg.capture_setting_value.CopyFrom(settings)
bytes_sent = self.s.send(request_msg.SerializeToString())
count_msg = self.s.recv(1024)
def navirice_get_image(self):
print("---Requesting new image...")
request_msg = navirice_image_pb2.ProtoRequest()
request_msg.state = navirice_image_pb2.ProtoRequest.IMAGE
request_msg.count = 1
bytes_sent = self.s.send(request_msg.SerializeToString())
count_msg = self.s.recv(1024)
count_obj = navirice_image_pb2.ProtoImageCount()
count_obj.ParseFromString(count_msg)
count = count_obj.count
print("image count: ", count)
continue_msg = navirice_image_pb2.ProtoAcknowledge()
continue_msg.count = 1
if self.last_count >= count:
print("Requesting stop because image count not new")
continue_msg.state = navirice_image_pb2.ProtoAcknowledge.STOP
bytes_sent = self.s.send(continue_msg.SerializeToString())
return None, self.last_count
else:
print("Requesting --continue")
continue_msg.state = navirice_image_pb2.ProtoAcknowledge.CONTINUE
bytes_sent = self.s.send(continue_msg.SerializeToString())
data = "".encode()
b_size = count_obj.byte_count
print("going to receive ", b_size, " bytes")
t = self.s.recv(b_size, socket.MSG_WAITALL)
data += t
print("received total of ", len(data), " bytes")
img_set = navirice_image_pb2.ProtoImageSet()
img_set.ParseFromString(data)
self.last_count = count
return img_set, count
HOST = '127.0.0.1' # The remote host
PORT = 29000 # The same port as used by the server
kc = KinectClient(HOST, PORT)
while(1):
kc.navirice_capture_settings(True, True, True)
img_set, last_count = kc.navirice_get_image()
if img_set is not None:
print("IMG#: ", img_set.count)
print("RGB width: ", img_set.RGB.width)
print("RGB height: ", img_set.RGB.height)
print("RGB channels: ", img_set.RGB.channels)
| mit | -5,851,558,242,900,377,000 | 33.444444 | 77 | 0.615543 | false | 3.544699 | false | false | false |
vermouth1992/Leetcode | python/661.image-smoother.py | 1 | 2408 | #
# @lc app=leetcode id=661 lang=python3
#
# [661] Image Smoother
#
# https://leetcode.com/problems/image-smoother/description/
#
# algorithms
# Easy (52.47%)
# Total Accepted: 55.7K
# Total Submissions: 106.1K
# Testcase Example: '[[1,1,1],[1,0,1],[1,1,1]]'
#
# An image smoother is a filter of the size 3 x 3 that can be applied to each
# cell of an image by rounding down the average of the cell and the eight
# surrounding cells (i.e., the average of the nine cells in the blue smoother).
# If one or more of the surrounding cells of a cell is not present, we do not
# consider it in the average (i.e., the average of the four cells in the red
# smoother).
#
# Given an m x n integer matrix img representing the grayscale of an image,
# return the image after applying the smoother on each cell of it.
#
#
# Example 1:
#
#
# Input: img = [[1,1,1],[1,0,1],[1,1,1]]
# Output: [[0,0,0],[0,0,0],[0,0,0]]
# Explanation:
# For the points (0,0), (0,2), (2,0), (2,2): floor(3/4) = floor(0.75) = 0
# For the points (0,1), (1,0), (1,2), (2,1): floor(5/6) = floor(0.83333333) = 0
# For the point (1,1): floor(8/9) = floor(0.88888889) = 0
#
#
# Example 2:
#
#
# Input: img = [[100,200,100],[200,50,200],[100,200,100]]
# Output: [[137,141,137],[141,138,141],[137,141,137]]
# Explanation:
# For the points (0,0), (0,2), (2,0), (2,2): floor((100+200+200+50)/4) =
# floor(137.5) = 137
# For the points (0,1), (1,0), (1,2), (2,1): floor((200+200+50+200+100+100)/6)
# = floor(141.666667) = 141
# For the point (1,1): floor((50+200+200+200+200+100+100+100+100)/9) =
# floor(138.888889) = 138
#
#
#
# Constraints:
#
#
# m == img.length
# n == img[i].length
# 1 <= m, n <= 200
# 0 <= img[i][j] <= 255
#
#
#
from typing import List
class Solution:
def imageSmoother(self, img: List[List[int]]) -> List[List[int]]:
output = []
for i in range(len(img)):
output.append([])
for j in range(len(img[0])):
sum = 0
total = 0
for k in range(-1, 2):
for m in range(-1, 2):
row = i + k
col = j + m
if row >= 0 and row < len(img) and col >= 0 and col < len(img[0]):
sum += img[row][col]
total += 1
output[i].append(sum // total)
return output
| mit | 6,298,467,093,459,828,000 | 28.728395 | 90 | 0.545681 | false | 2.723982 | false | false | false |
thunderpush/thunderpush | setup.py | 1 | 1160 | #!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = [
'sockjs-tornado==1.0.0',
'tornado==3.2.2',
'argparse',
# simplejson is really required for py3 support to avoid encoding problems
'simplejson'
]
tests_require = [
'mock==2.0.0'
]
setup(
name='thunderpush',
version='1.0.1',
author='Krzysztof Jagiello',
author_email='[email protected]',
description='Tornado and SockJS based, complete Web push solution.',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
license='BSD',
include_package_data=True,
url='https://github.com/thunderpush/thunderpush',
test_suite='thunderpush.tests.suite',
entry_points={
'console_scripts': [
'thunderpush = thunderpush.runner:main',
],
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Internet',
],
)
| bsd-3-clause | 8,832,320,492,365,194,000 | 25.363636 | 78 | 0.625862 | false | 3.68254 | false | false | false |
flother/agate | agate/table/join.py | 1 | 5769 | #!/usr/bin/env python
# pylint: disable=W0212
from agate.rows import Row
from agate import utils
@utils.allow_tableset_proxy
def join(self, right_table, left_key, right_key=None, inner=False, require_match=False, columns=None):
"""
Create a new table by joining two table's on common values.
This method performs the equivalent of SQL's "left outer join", combining
columns from this table and from :code:`right_table` anywhere that the
:code:`left_key` and :code:`right_key` are equivalent.
Where there is no match for :code:`left_key` the left columns will
be included with the right columns set to :code:`None` unless
the :code:`inner` argument is specified.
If :code:`left_key` and :code:`right_key` are column names, only
the left columns will be included in the output table.
Column names from the right table which also exist in this table will
be suffixed "2" in the new table.
:param right_table:
The "right" table to join to.
:param left_key:
Either the name of a column from the this table to join on, a
sequence of such column names, or a :class:`function` that takes a
row and returns a value to join on.
:param right_key:
Either the name of a column from :code:table` to join on, a
sequence of such column names, or a :class:`function` that takes a
row and returns a value to join on. If :code:`None` then
:code:`left_key` will be used for both.
:param inner:
Perform a SQL-style "inner join" instead of a left outer join. Rows
which have no match for :code:`left_key` will not be included in
the output table.
:param require_match:
If true, an exception will be raised if there is a left_key with no
matching right_key.
:param columns:
A sequence of column names from :code:`right_table` to include in
the final output table. Defaults to all columns not in
:code:`right_key`.
:returns:
A new :class:`.Table`.
"""
if right_key is None:
right_key = left_key
# Get join columns
right_key_indices = []
left_key_is_func = hasattr(left_key, '__call__')
left_key_is_sequence = utils.issequence(left_key)
# Left key is a function
if left_key_is_func:
left_data = [left_key(row) for row in self._rows]
# Left key is a sequence
elif left_key_is_sequence:
left_columns = [self._columns[key] for key in left_key]
left_data = zip(*[column.values() for column in left_columns])
# Left key is a column name/index
else:
left_data = self._columns[left_key].values()
right_key_is_func = hasattr(right_key, '__call__')
right_key_is_sequence = utils.issequence(right_key)
# Right key is a function
if right_key_is_func:
right_data = [right_key(row) for row in right_table._rows]
# Right key is a sequence
elif right_key_is_sequence:
right_columns = [right_table._columns[key] for key in right_key]
right_data = zip(*[column.values() for column in right_columns])
right_key_indices = [right_table._columns._keys.index(key) for key in right_key]
# Right key is a column name/index
else:
right_column = right_table._columns[right_key]
right_data = right_column.values()
right_key_indices = [right_table._columns._keys.index(right_key)]
# Build names and type lists
column_names = list(self._column_names)
column_types = list(self._column_types)
for i, column in enumerate(right_table._columns):
name = column.name
if columns is None and i in right_key_indices:
continue
if columns is not None and name not in columns:
continue
if name in self.column_names:
column_names.append('%s2' % name)
else:
column_names.append(name)
column_types.append(column.data_type)
if columns is not None:
right_table = right_table.select([n for n in right_table._column_names if n in columns])
right_hash = {}
for i, value in enumerate(right_data):
if value not in right_hash:
right_hash[value] = []
right_hash[value].append(right_table._rows[i])
# Collect new rows
rows = []
if self._row_names is not None:
row_names = []
else:
row_names = None
# Iterate over left column
for left_index, left_value in enumerate(left_data):
matching_rows = right_hash.get(left_value, None)
if require_match and matching_rows is None:
raise ValueError('Left key "%s" does not have a matching right key.' % left_value)
# Rows with matches
if matching_rows:
for right_row in matching_rows:
new_row = list(self._rows[left_index])
for k, v in enumerate(right_row):
if columns is None and k in right_key_indices:
continue
new_row.append(v)
rows.append(Row(new_row, column_names))
if self._row_names is not None:
row_names.append(self._row_names[left_index])
# Rows without matches
elif not inner:
new_row = list(self._rows[left_index])
for k, v in enumerate(right_table._column_names):
if columns is None and k in right_key_indices:
continue
new_row.append(None)
rows.append(Row(new_row, column_names))
if self._row_names is not None:
row_names.append(self._row_names[left_index])
return self._fork(rows, column_names, column_types, row_names=row_names)
| mit | 3,946,371,038,658,788,400 | 33.963636 | 102 | 0.618998 | false | 3.843438 | false | false | false |
jonathanmorgan/reddit_collect | redditCollector.py | 1 | 78933 | # start to python 3 support:
from __future__ import unicode_literals
'''
Copyright 2012, 2013 Jonathan Morgan
This file is part of http://github.com/jonathanmorgan/reddit_collect.
reddit_collect is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
reddit_collect is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with http://github.com/jonathanmorgan/reddit_collect. If not, see http://www.gnu.org/licenses/.
'''
#!/usr/bin/python
#================================================================================
# imports
#================================================================================
# base python libraries
import datetime
import gc
import sys
import time
# django imports
import django.db
# site-specific imports.
#site_path = '/home/socs/socs_reddit/'
#if site_path not in sys.path:
# sys.path.append( site_path )
#import myLib
import reddit_collect.models
# python_utilities
from python_utilities.email.email_helper import EmailHelper
from python_utilities.exceptions.exception_helper import ExceptionHelper
from python_utilities.logging.summary_helper import SummaryHelper
from python_utilities.rate_limited.basic_rate_limited import BasicRateLimited
from python_utilities.strings.string_helper import StringHelper
# ReddiWrapper
from reddiwrap.ReddiWrap import ReddiWrap
#================================================================================
# class RedditCollector
#================================================================================
class RedditCollector( BasicRateLimited ):
#============================================================================
# CONSTANTS-ish
#============================================================================
STATUS_SUCCESS = "Success!"
STATUS_PREFIX_ERROR = "ERROR: "
# DEBUG - changed to instance variable.
#DEBUG_FLAG = False
#============================================================================
# instance variables
#============================================================================
reddiwrap_instance = None
user_agent = ""
username = ""
password = ""
cookie_file_path = ""
# email helpers.
email_helper = None
email_status_address = ""
# rate limiting - in parent class BasicRateLimited.
#do_manage_time = True
#rate_limit_in_seconds = 2
#request_start_time = None
# response item limit - 200 when not logged in, 500 when logged in, 1500 when gold.
response_item_limit = 1500
# performance
do_bulk_create = True
# check for existing?
do_check_for_existing = True
# encoding, to deal with utf8 in mysql actually just allowing for up to
# 3-byte unicode characters, not all (4-byte and above).
convert_4_byte_unicode_to_entity = False
# error handling
error_limit_count = 10
exception_helper = ExceptionHelper()
# debug_flag - don't use this for outputting details, just for serious
# debugging information.
debug_flag = False
# output details?
do_output_details = False
# bulk comment processing.
bulk_comments_processed = 0
#---------------------------------------------------------------------------
# __init__() method
#---------------------------------------------------------------------------
def __init__( self ):
'''
Constructor
'''
# instance variables
self.reddiwrap_instance = None
self.user_agent = ""
self.username = ""
self.password = ""
self.cookie_file_path = ""
# flag to say if this instance should manage time.
self.do_manage_time = True
self.rate_limit_in_seconds = 2
self.request_start_time = None
# email
self.email_helper = None
self.email_status_address = ""
# response item limit - 200 when not logged in, 500 when logged in, 1500 when gold.
self.response_item_limit = 1500
# performance
self.do_bulk_create = True
# check for existing?
self.do_check_for_existing = True
# encoding
self.convert_4_byte_unicode_to_entity = True
# error handling
self.error_limit_count = 10
self.exception_helper = ExceptionHelper()
# debug_flag - don't use this for outputting details, just for serious
# debugging information.
self.debug_flag = False
# output details?
self.do_output_details = False
# bulk comment processing.
self.bulk_comments_processed = 0
#-- END constructor --#
#============================================================================
# instance methods
#============================================================================
def collect_comments( self,
posts_qs_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
This method accepts a QuerySet of django reddit_collect Post() instances
for which you want to collect comments. Uses ReddiWrapper to do the
actual retrieval, then stores them off in database using django.
Parameters:
- posts_qs_IN - defaults to None. QuerySet containing posts you want to collect comments for. If None, will collect for all posts in database whose comment status is not "done".
- do_update_existing_IN - Boolean, True if we want to update existing comments that are already in the database, false if not. Defaults to True.
Postconditions: Stores comments for each post to database using django
model classes. Returns a status message.
# Original Code
posts = myLib.posts_of_reddit(subreddit.name); # corrent
print "saving Comments ... ";
i = 0;
for post in posts:
pst = myLib.make_post_obj(post);
reddit.fetch_comments(pst);
myLib.iterate_comments(pst.comments); # iterates and save comments
time.sleep(1);
i = i + 1;
print i;
'''
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
me = "collect_comments"
my_summary_helper = None
reddiwrap = None
posts_to_process_qs = None
post_count = -1
current_post = None
post_counter = -1
continue_collecting = True
current_rw_post = None
do_update_existing = False
# variables for dealing with intermittent connection problems.
comments_collected = False
connection_error_count = -1
temp_exception_string = ""
exception_details = ""
exception_message = ""
error_email_subject = ""
error_email_message = ""
error_email_status = ""
# variables for storing comments in database.
django_do_bulk_create = True
django_comment_create_list = []
comment_create_count = -1
django_current_create_count = -1
# variables for updating post based on comment collection.
do_update_post = False
# variables for exception handling.
exception_type = ""
exception_value = ""
exception_traceback = ""
# variables for summary information
new_posts_processed = -1
first_reddit_id_processed = ""
start_dt = ""
temp_string = ""
summary_string = ""
summary_email_subject = ""
summary_email_message = ""
# initialize summary helper
my_summary_helper = SummaryHelper()
# get reddiwrap instance
reddiwrap = self.get_reddiwrap_instance()
# initialize variables
post_counter = 0
comment_create_count = 0
django_do_bulk_create = False
django_bulk_create_list = []
django_bulk_create_count = 0
start_dt = datetime.datetime.now()
# set bulk create flag
django_do_bulk_create = self.do_bulk_create
# updating existing?
do_update_existing = do_update_existing_IN
# check to see if we have a QuerySet
if ( ( posts_qs_IN ) and ( posts_qs_IN != None ) ):
# yes. Use QuerySet passed in.
posts_to_process_qs = posts_qs_IN
else:
# no - get all that are eligible to be processed.
posts_to_process_qs = reddit_collect.models.Post.objects.filter( comment_collection_status != reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_DONE )
#-- END check to see if posts passed in --#
# big outer try/except.
try:
# loop over posts.
post_counter = 0
do_update_post = False
django_current_create_count = 0
continue_collecting = True
post_count = len( posts_to_process_qs )
for current_post in posts_to_process_qs:
# see if it is OK to continue.
# call may_i_continue() if other than first post
if ( post_counter > 0 ):
# not first post. call may_i_continue()
continue_collecting = self.may_i_continue()
#-- END check to see if first post --#
# OK to continue?
if continue_collecting == True:
# reset variables
do_update_post = False
django_current_create_count = 0
# increment post counter
post_counter += 1
print( "- " + str( post_counter ) + " of " + str( post_count ) + " - " + str( datetime.datetime.now() ) + " - post " + str( current_post.id ) + " ( reddit ID: " + current_post.reddit_id + " ) by " + current_post.author_name + " - num_comments: " + str( current_post.num_comments ) + " - created on " + str( current_post.created_utc_dt ) )
# memory management.
gc.collect()
django.db.reset_queries()
# set request start time (OK to be a little inefficient)
self.start_request()
# populate a reddiwrap Post instance.
current_rw_post = current_post.create_reddiwrap_post()
# use reddiwrap to load comments.
# wrap in loop
comments_collected = False
connection_error_count = 0
while ( comments_collected == False ):
try:
reddiwrap.fetch_comments( current_rw_post, self.response_item_limit, "old" );
comments_collected = True
except Exception as e:
# set flag to False
comments_collected = False
# increment error count.
connection_error_count += 1
# make exception message
exception_message = "In " + me + ": reddiwrap.fetch_comments() threw exception, fetching comments for post " + str( current_post.id ) + " ( reddit ID: " + current_post.reddit_id + " ); post " + str( post_counter ) + " of " + str( post_count )
# are we up to error limit yet?
if ( connection_error_count >= self.error_limit_count ):
# yes - send email about problems
error_email_subject = "Connection problem with comment collector."
exception_message = "Comment collector failed to connect " + str( self.error_limit_count ) + " times. Details:\n" + exception_message
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception
raise( e )
else:
# haven't reached error limit yet. Process exception,
# no email, pause 10 seconds, then try again.
self.process_exception( e, exception_message, False )
time.sleep( 10 )
#-- END check to see if we've exceeded error limit. --#
#-- END try/except around collecting comments from reddit. --#
#-- END loop around collecting comments --#
# !update - bulk or not?
if ( django_do_bulk_create == True ):
# so we can get counts, set self.bulk_comments_created to 0 before each call.
self.bulk_comments_processed = 0
# process comment list in bulk (recursive)
django_bulk_create_list = self.process_comments_bulk( post_IN = current_post,
comment_list_IN = current_rw_post.comments,
do_update_existing_IN = do_update_existing )
# get number of comments processed.
django_current_create_count = self.bulk_comments_processed
comment_create_count += django_current_create_count
else:
# process comment list (recursive)
django_current_create_count = self.process_comments( post_IN = current_post, comment_list_IN = current_rw_post.comments, do_update_existing_IN = do_update_existing )
# increment total count
comment_create_count += django_current_create_count
#-- END check to see if bulk or not. --#
# !Update post?
# update the post to show that it has been comment-harvested.
if ( current_post.comment_collection_status == reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_NEW ):
# update status to "ongoing".
current_post.comment_collection_status = reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_ONGOING
# we need to save updates.
do_update_post = True
#-- END check to see if first-time updating comments. --#
# check to see if more comments detected that reddiwrap
# couldn't pull in.
if ( current_rw_post.has_more_comments == True ):
# yes, more comments. Store off details.
current_post.has_more_comments = current_rw_post.has_more_comments
current_post.more_comments_details = current_rw_post.more_comments
# we need to save updates.
do_update_post = True
#-- END check to see if more comments detected. --#
# did we actually process any comments?
if ( django_current_create_count >= 0 ):
# we did. set number of comments processed.
current_post.num_comments_collected = django_current_create_count
# we need to save updates.
do_update_post = True
#-- END check to see if we have a valid comment count --#
# update post?
if ( do_update_post == True ):
# we do. call save() method.
current_post.save()
#-- END check to see if we update post. --#
else:
# may_i_continue() returned False. Once that happens once,
# unlikely it will return True ever again.
print( "====> In " + me + ": may_i_continue() returned False. This shouldn't be possible. Falling out of loop." )
break
#-- END check to see if we are OK to continue collecting. --#
print( " ==> In " + me + ": processed " + str( django_current_create_count ) + " comments." )
#-- END loop over posts. --#
except Exception as e:
# yes - send email about problems
error_email_subject = "Unexpected problem with comment collector."
exception_message = "Unexpected problem with comment collector. Details:\n"
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
#raise( e )
#-- END super try/except around entire loop.
# output overall summary
summary_string = ""
# add stuff to summary
my_summary_helper.set_stop_time()
my_summary_helper.set_prop_value( "post_count", post_count )
my_summary_helper.set_prop_desc( "post_count", "Posts passed in" )
my_summary_helper.set_prop_value( "post_counter", post_counter )
my_summary_helper.set_prop_desc( "post_counter", "Posts processed" )
my_summary_helper.set_prop_value( "comment_create_count", comment_create_count )
my_summary_helper.set_prop_desc( "comment_create_count", "Comments processed" )
summary_string += my_summary_helper.create_summary_string( item_prefix_IN = "==> " )
print( summary_string )
# email summary
summary_email_subject = "Comment collection complete - " + str( datetime.datetime.now() )
summary_email_message = "Comment collection summary:\n"
summary_email_message += summary_string
summary_email_status = self.email_send_status( summary_email_message, summary_email_subject )
print( "==> Summary email status: " + summary_email_status )
return status_OUT
#-- END method collect_comments() --#
def collect_posts( self,
subreddit_IN = "all",
post_count_limit_IN = -1,
until_id_IN = "",
until_date_IN = None,
subreddit_in_list_IN = [],
after_id_IN = None,
before_id_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
This method collects posts from any subreddit you want, defaulting to the
/r/all subreddit, which allows access to the entire history of reddit.
Accepts parameters that let you collect from a given ID on (the
easiest way to collect starting at a certain date - find a post around
the date you want, collect from that ID on), to a certain date, until
you find a certain post ID, etc.
Parameters:
- subreddit_IN - defaults to "all". Subreddit you want to collect from.
- post_count_limit_IN - number of posts we want to collect.
- until_id_IN - value of ID we collect until we encounter in the stream (should include type - so begin with "t3_").
- until_date_IN - datetime instance of UTC/GMT date and time we want to collect to (will stop collecting once a date after this is encountered).
- subreddit_in_list_IN - list of subreddits to limit our collection to (each should begin with "t5_"). If you use this, in most cases, you should leave subreddit_IN = "all".
- after_id_IN - ID you want to get posts after. Must include type (start with "t3_").
- before_id_IN - ID before which you want posts. Must include type (start with "t3_").
- do_update_existing_IN - Boolean, True if we want to update existing posts that are already in the database, false if not. Defaults to True.
Parameters to come (TK):
- start_date_IN - datetime instance of date and time after which we want to collect (will ignore until a post is greater-than-or-equal to this date). For now, to collect from a certain date, find a post around the date you want, collect from that ID on using the after_id_IN parameter.
Postconditions: Stores each matching post to the database using django
model classes. Returns a status message.
'''
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
me = "collect_posts"
my_summary_helper = None
reddiwrap = None
post_count = -1
api_url = ""
post_list = None
continue_collecting = True
current_rw_post = None
current_post_reddit_id = ""
current_post_created = ""
current_post_created_dt = None
current_post_subreddit_id = ""
do_update_existing = False
# variables for storing post in database.
django_do_bulk_create = True
django_post_create_list = []
django_bulk_create_count = -1
django_current_create_count = -1
django_post = None
is_post_in_database = False
do_call_post_save = False
# variables for exception handling.
exception_type = ""
exception_value = ""
exception_traceback = ""
# variables for summary information
new_posts_processed = -1
update_count = -1
first_reddit_id_processed = ""
start_dt = None
temp_dt = None
temp_string = ""
summary_string = ""
summary_email_subject = ""
summary_email_message = ""
# get reddiwrap instance
reddiwrap = self.get_reddiwrap_instance()
# initialize variables
post_count = 0
new_posts_processed = 0
update_count = 0
django_do_bulk_create = self.do_bulk_create
django_bulk_create_count = 0
# initialize summary helper
my_summary_helper = SummaryHelper()
# updating existing?
do_update_existing = do_update_existing_IN
# create URL - first, add in reddit, limit.
api_url = "/r/%s/new?limit=100" % subreddit_IN
# add ability to add parameterized limit to URL?
# after param?
if ( ( after_id_IN ) and ( after_id_IN != None ) and ( after_id_IN != "" ) ):
# yes. Add it to the URL.
api_url += "&after=" + after_id_IN
#-- END check to see if after ID passed in. --#
# before param?
if ( ( before_id_IN ) and ( before_id_IN != None ) and ( before_id_IN != "" ) ):
# yes. Add it to the URL.
api_url += "&before=" + before_id_IN
#-- END check to see if after ID passed in. --#
# big outer try/except.
try:
# loop until flag is false
while continue_collecting == True:
print( "In " + me + ": top of loop - " + str( datetime.datetime.now() ) + " - latest post = " + current_post_reddit_id + " ( " + str( current_post_created_dt ) + " ), number " + str( post_count ) + "." )
# memory management.
gc.collect()
django.db.reset_queries()
# set request start time (OK to be a little inefficient)
self.start_request()
# bulk create?
if ( django_do_bulk_create == True ):
# clear out the bulk create list.
django_post_create_list = []
#-- END check to see if doing bulk create. --#
# get first set of results, or grab next set of results.
if ( post_count == 0 ):
# get first set of results for /r/all
post_list = reddiwrap.get( api_url )
else:
# get next set of posts.
post_list = reddiwrap.get_next()
#-- END check to see how we grab more posts. --#
temp_dt = datetime.datetime.now()
print( "In " + me + ": after retrieving stuff from reddit - " + str( temp_dt ) + "; elapsed: " + str( temp_dt - self.request_start_time ) + " - latest post = " + current_post_reddit_id + " ( " + str( current_post_created_dt ) + " ), number " + str( post_count ) + "." )
#--------------------------------------------------------------------
# loop over posts.
#--------------------------------------------------------------------
for current_rw_post in post_list:
# increment post counter.
post_count += 1
# initialize variables
do_call_post_save = False
# get info. on current post.
current_post_reddit_id = current_rw_post.id
current_post_id_with_type = "t3_" + current_post_reddit_id
current_post_created = current_rw_post.created_utc
current_post_created_dt = datetime.datetime.fromtimestamp( int( current_post_created ) )
current_post_subreddit_id = current_rw_post.subreddit_id
current_post_subreddit_name = current_rw_post.subreddit
current_post_url = current_rw_post.url
# initialize variables
is_post_in_database = False
if ( self.do_output_details == True ):
print( "In " + me + ": reddit post " + current_post_id_with_type + " is post number " + str( post_count ) + ", subreddit = " + current_post_subreddit_name + ": URL = " + current_post_url )
#-- END DEBUG --#
# first post? (I know, couldn't think of a better way...)
if ( post_count == 1 ):
# store the first ID.
first_reddit_id_processed = current_post_id_with_type
#-- END check to see if post count = 1 --#
#----------------------------------------------------------------
# conditions for stopping collection
#----------------------------------------------------------------
# do we have a post count limit?
if ( ( post_count_limit_IN ) and ( post_count_limit_IN > 0 ) ):
# yes - has post count exceded this count?
if ( post_count > post_count_limit_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " is post number " + str( post_count ) + ", putting us over our limit of " + str( post_count_limit_IN ) + ". Stopping collection." )
#-- END check to see if current post puts us over our post limit. --#
#-- END check for post count limit. --#
# do we have an until ID?
if ( ( until_id_IN ) and ( until_id_IN != "" ) ):
# is current ID the until ID?
if ( current_post_reddit_id == until_id_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " is our until post ( " + until_id_IN + " ). Stopping collection." )
#-- END check to see if current post is post at which we are to stop. --#
#-- END check for until ID. --#
# do we have an until date?
if ( ( until_date_IN ) and ( until_date_IN != None ) ):
#-- we have an until date... is current date less than until date?
if ( current_post_created_dt < until_date_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " has date " + str( current_post_created_dt ) + " that is past our until date. Stopping collection." )
#-- END check to see if post's date is past the cutoff. --#
#-- END check to see if we have an until date --#
#----------------------------------------------------------------
# collection logic
#----------------------------------------------------------------
# do we continue collecting?
if ( continue_collecting == True ):
# Only process if either there is no subreddit list, or the
# subreddit is in the list.
if ( ( len( subreddit_in_list_IN ) <= 0 ) or ( current_post_subreddit_id in subreddit_in_list_IN ) ):
# ==> post already in database?
try:
# lookup post.
django_post = reddit_collect.models.Post.objects.get( reddit_id = current_post_reddit_id )
# post is in database
is_post_in_database = True
# print( "In " + me + ": reddit post " + current_post_reddit_id + " is already in database." )
except:
# Not found. Create new instance, set flag.
django_post = reddit_collect.models.Post()
is_post_in_database = False
#-- END - check for post in database --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
# OLD - allowing for update now.
#if ( django_post == None ):
# set fields from reddiwrap post instance.
django_post.set_fields_from_reddiwrap( current_rw_post, self.convert_4_byte_unicode_to_entity )
# !==> How do we process this post?
# - First, check if in database or not.
# - We add to bulk list if bulk is turned on AND comment not in database.
if ( is_post_in_database == False ):
# not in database. Increment new post count.
new_posts_processed += 1
# new post - bulk or not?
if ( django_do_bulk_create == True ):
# bulk create. Add to list.
django_post_create_list.append( django_post )
# bulk. No save.
do_call_post_save = False
else:
# not bulk. Just save.
do_call_post_save = True
#-- END check to see if bulk or not. --#
# if in database, if also are updating, set save() flag.
elif ( ( is_post_in_database == True ) and ( do_update_existing == True ) ):
# in database - increment update count.
update_count += 1
# not bulk. Just save.
do_call_post_save = True
else:
# for all others (probably existing, but not
# updating), don't save.
do_call_post_save = False
#-- END check to see how we process post --#
# see if we need to call save()
if ( do_call_post_save == True ):
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_post.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving post."
exception_message = "In " + me + ": reddit post " + current_post_reddit_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
#-- END check to see if we call save() --#
#-- END check to see if subreddit list indicates we should process this post. --#
#-- END check to see if we continue collecting --#
#-- END loop over current set of posts. --#
#--------------------------------------------------------------------
# bulk create?
#--------------------------------------------------------------------
if ( django_do_bulk_create == True ):
# yes, bulk create. Anything in the create list?
django_current_create_count = len( django_post_create_list )
if ( django_current_create_count > 0 ):
# yes. Bulk create, then update count.
# exception handling around save, to deal with encoding (!).
try:
# save to database using bulk_create().
reddit_collect.models.Post.objects.bulk_create( django_post_create_list )
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem bulk-saving posts."
exception_message = "In " + me + ": bulk_create() threw exception. Last reddit post ID processed: " + current_post_reddit_id + "; count of posts being bulk created = " + str( django_current_create_count )
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around bulk_create() --#
# increment the total posts created counter
django_bulk_create_count += django_current_create_count
# could empty create list here, but doing it at top of loop,
# so no need to do it twice.
#-- END check to see if posts to create --#
#-- END check to see if bulk create --#
#--------------------------------------------------------------------
# if we haven't already decided to stop, check if we can continue.
#--------------------------------------------------------------------
if ( continue_collecting == True ):
# no reason to stop yet... Do we have more posts?
if ( reddiwrap.has_next() == False ):
# no - do not continue.
continue_collecting = False
else:
# see if we are allowed to continue.
continue_collecting = self.may_i_continue()
#-- END checks to see if we continue collecting. --#
#-- END check to see if we continue collecting. --#
#-- END outer reddit collection loop --#
except Exception as e:
# yes - send email about problems
error_email_subject = "Unexpected problem with post collector."
exception_message = "Unexpected problem with post collector. Details:\n"
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
#raise( e )
#-- END super try/except around entire loop.
# output overall summary
summary_string = ""
# add stuff to summary
my_summary_helper.set_stop_time()
my_summary_helper.set_prop_value( "post_count", post_count )
my_summary_helper.set_prop_desc( "post_count", "Posts processed" )
my_summary_helper.set_prop_value( "new_posts_processed", new_posts_processed )
my_summary_helper.set_prop_desc( "new_posts_processed", "New posts" )
if ( do_update_existing == True ):
my_summary_helper.set_prop_value( "update_count", update_count )
my_summary_helper.set_prop_desc( "update_count", "Updated posts" )
#-- END check to see if we are updating. --#
if ( django_do_bulk_create == True ):
my_summary_helper.set_prop_value( "django_bulk_create_count", django_bulk_create_count )
my_summary_helper.set_prop_desc( "django_bulk_create_count", "Posts bulk_create()'ed" )
#-- END check to see if bulk create --#
my_summary_helper.set_prop_value( "first_reddit_id_processed", first_reddit_id_processed )
my_summary_helper.set_prop_desc( "first_reddit_id_processed", "First reddit ID processed" )
my_summary_helper.set_prop_value( "current_post_reddit_id", current_post_reddit_id )
my_summary_helper.set_prop_desc( "current_post_reddit_id", "Last reddit ID processed" )
summary_string += my_summary_helper.create_summary_string( item_prefix_IN = "==> " )
print( summary_string )
# email summary
summary_email_subject = "Post collection complete - " + str( datetime.datetime.now() )
summary_email_message = "Post collection summary:\n"
summary_email_message += summary_string
summary_email_status = self.email_send_status( summary_email_message, summary_email_subject )
print( "==> Summary email status: " + summary_email_status )
return status_OUT
#-- END method collect_posts() --#
def create_reddiwrap_instance( self, *args, **kwargs ):
'''
Creates and returns ReddiWrap instance for User Agent in this
instance, and if there is both a username and a password, also logs it
in using those credentials. If error, returns None.
'''
# return reference
instance_OUT = None
# declare variables
my_user_agent = ""
my_cookie_file_path = ""
my_username = ""
my_password = ""
do_login = False
login_result = -1
# create new instance.
my_user_agent = self.user_agent
instance_OUT = ReddiWrap( user_agent = my_user_agent )
# do we have a cookie file path? If so, try to load cookies.
my_cookie_file_path = self.cookie_file_path
if ( ( my_cookie_file_path ) and ( my_cookie_file_path != "" ) ):
instance_OUT.load_cookies( my_cookie_file_path )
#-- END check to see if cookie file path --#
# got username and password?
my_username = self.username
my_password = self.password
if ( ( ( my_username ) and ( my_username != "" ) ) and ( ( my_password ) and ( my_password != "" ) ) ):
# from cookie file, is this user already authenticated?
if ( instance_OUT.logged_in == False ):
# log in.
do_login = True
# logged in - same username? If not, log in again.
elif ( reddit.user.lower() != my_username.lower() ):
# log in.
do_login = True
else:
# logged_in is True and it is the same user name. No need to
# log in again.
do_login = False
#-- END check to see if we need to log in. --#
# Do we need to log in?
if ( do_login == True ):
# yes, we need to login. Try it.
print('logging into %s' % my_username)
login_result = instance_OUT.login( user = my_username, password = my_password )
# success?
if ( login_result != 0 ):
# fail. Output message.
print( 'ERROR - unable to log in with username: %s; password: %s (error code %d where 1 = invalid password, 2 = over rate limit, -1 = unexpected error)' % ( my_username, my_password, login_result ) )
# return None?
# instance_OUT = None
else:
# success! If cookie path, update cookies.
if ( ( my_cookie_file_path ) and ( my_cookie_file_path != "" ) ):
# save cookies.
instance_OUT.save_cookies( my_cookie_file_path )
#-- END check to see if we have a cookie file path. --#
#-- END check to see if success --#
#-- END check to see if we need to log in. --#
#-- END check to see if we have a username and a password. --#
return instance_OUT
#-- END create_reddiwrap_instance() --#
def email_initialize( self, smtp_host_IN = "localhost", smtp_port_IN = -1, smtp_use_ssl_IN = False, smtp_username_IN = "", smtp_password_IN = "", *args, **kwargs ):
'''
Accepts properties that can be used to initialize an email helper
instance. Initializes object, stores it in instance variable.
'''
# declare variables
my_email_helper = None
my_exception_helper = None
# create email helper
my_email_helper = EmailHelper()
# set host.
my_email_helper.set_smtp_server_host( smtp_host_IN )
# set port?
if ( ( smtp_port_IN ) and ( smtp_port_IN != None ) and ( smtp_port_IN > 0 ) ):
my_email_helper.set_smtp_server_port( smtp_port_IN )
#-- END check to see if port passed in. --#
# use ssl?
my_email_helper.set_smtp_server_use_SSL( smtp_use_ssl_IN )
# set username?
if ( ( smtp_username_IN ) and ( smtp_username_IN != None ) and ( smtp_username_IN != "" ) ):
my_email_helper.set_smtp_server_username( smtp_username_IN )
#-- END check to see if username passed in --#
# set password?
if ( ( smtp_password_IN ) and ( smtp_password_IN != None ) and ( smtp_password_IN != "" ) ):
my_email_helper.set_smtp_server_password( smtp_password_IN )
#-- END check to see if password passed in --#
# store in instance variable.
self.email_helper = my_email_helper
# Do we have an Exception Helper?
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper ) ):
# we do. Add the email_helper to the exception helper, also.
my_exception_helper.email_helper = my_email_helper
#-- END check to see if exception helper --#
#-- END method email_initialize() --#
def email_send( self, message_IN = None, subject_IN = None, from_address_IN = None, to_address_IN = None, *args, **kwargs ):
'''
Uses nested email_helper instance to send email. Returns status message.
If status returned is email_helper.STATUS_SUCCESS, then success, if
anything else, it is an error message explaining why the email was not
sent.
'''
# return reference
status_OUT = ""
# declare variables
my_email_helper = None
# get email helper
my_email_helper = self.email_helper
# got a helper?
if ( ( my_email_helper ) and ( my_email_helper != None ) ):
# yes - send email
status_OUT = my_email_helper.send_email( message_IN, subject_IN, from_address_IN, to_address_IN )
else:
# no - error.
status_OUT = "ERROR - no email helper present, so can't send email."
#-- END check to see if we have an email helper. --#
return status_OUT
#-- END method email_send() --#
def email_send_status( self, message_IN = None, subject_IN = None, *args, **kwargs ):
'''
If email helper and status email are set, uses nested email_helper
instance to send email to status email. Returns status message.
If status returned is email_helper.STATUS_SUCCESS, then success, if
anything else, it is an error message explaining why the email was not
sent.
'''
# return reference
status_OUT = ""
# declare variables
my_email_helper = None
my_status_email = ""
# get email helper and status address
my_email_helper = self.email_helper
my_status_email = self.email_status_address
# got a helper?
if ( ( my_email_helper ) and ( my_email_helper != None ) ):
# yes. Got a status email address?
if ( ( my_status_email ) and ( my_status_email != None ) and ( my_status_email != "" ) ):
# yes - send email
status_OUT = my_email_helper.send_email( message_IN, subject_IN, my_status_email, my_status_email )
else:
# no status email address set.
status_OUT = "ERROR - no email address set for sending status messages. Can't send email status."
#-- END check to see if status email present --#
else:
# no - error.
status_OUT = "ERROR - no email helper present, so can't send email."
#-- END check to see if we have a mail helper. --#
return status_OUT
#-- END method email_send_status() --#
def get_reddiwrap_instance( self, *args, **kwargs ):
'''
If there is a reddiwrap instance already in this instance, returns it.
If not, creates and returns ReddiWrap instance for User Agent in this
instance, and if there is both a username and a password, also logs it
in using those credentials. Stores a newly created instance in this
object, so it can be re-used. If error, returns None.
'''
# return reference
instance_OUT = None
# declare variables
instance_OUT = self.reddiwrap_instance
if ( ( not instance_OUT ) or ( instance_OUT == None ) ):
# create new instance.
instance_OUT = self.create_reddiwrap_instance()
# store it.
self.reddiwrap_instance = instance_OUT
# retrieve from that variable, just so we make sure it got stored.
instance_OUT = self.reddiwrap_instance
#-- END check to see if there is anything in m_e2user_node_type.
return instance_OUT
#-- END get_reddiwrap_instance() --#
def process_comments( self,
post_IN = None,
comment_list_IN = [],
parent_comment_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
Accepts django reddit_collect.models.Post instance, list of reddiwrap
comment instances. Loops over all comments in the list, processing
each, then checking for child comments. If child(ren) found, calls
this routine again, also passing parent comment, so they reference
both root parent post and parent comment. Returns count of comments
created. This method creates all django relations as well as storing
IDs from reddit. The process_comments_bulk() method stores reddit IDs
so comment relations can be pieced together, but doesn't create django
relations, as well.
Parameters:
- post_IN - reddit_collect.models.Post instance, so we can relate comments to their post.
- comment_list_IN - list of reddiwrap Comment instances we are to store in the database.
- parent_comment_IN - reddit_collect.models.Comment instance of parent comment, so we can relate the child comment back to it.
- do_update_existing_IN - Boolean, True if we want to update existing comments that are already in the database, false if not. Defaults to True.
'''
# return reference
comment_count_OUT = 0
# declare variables
me = "process_comments"
do_update_existing = False
comment_count = -1
update_count = -1
new_comment_count = -1
current_rw_comment = None
comment_reddit_full_id = ""
django_comment = None
is_comment_in_database = False
django_do_bulk_create = False
comment_children = None
child_count = -1
# initialize variables
comment_count = 0
# updating existing?
do_update_existing = do_update_existing_IN
if ( self.do_output_details == True ):
print( "In " + me + ": update existing?: " + str( do_update_existing ) )
#-- END check to see if outputting details --#
# do we have a comment list
if ( ( comment_list_IN ) and ( len( comment_list_IN ) > 0 ) ):
# we have comments. Loop over them.
for current_rw_comment in comment_list_IN:
# increment count
comment_count += 1
# get the full ID
comment_reddit_full_id = current_rw_comment.name
# ==> comment already in database?
try:
# lookup comment.
django_comment = reddit_collect.models.Comment.objects.get( reddit_full_id = comment_reddit_full_id )
# post is in database
is_comment_in_database = True
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " is in database." )
#-- END check to see if outputting details --#
except:
# Not found. Create new instance, set flag.
django_comment = reddit_collect.models.Comment()
is_comment_in_database = False
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " is not in database." )
#-- END check to see if outputting details --#
#-- END - check for comment in database --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
# OLD - allowing for update now.
#if ( django_comment == None ):
# ==> Do we process this comment? We do if:
# - comment is not in database. - OR -
# - comment is in database, but update flag is true.
if ( ( is_comment_in_database == False ) or ( ( is_comment_in_database == True ) and ( do_update_existing == True ) ) ):
if ( self.do_output_details == True ):
print( "====> In " + me + ": processing reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# Update appropriate counter
if ( is_comment_in_database == True ):
# in database - increment update count.
update_count += 1
else:
# not in database. Increment new post count.
new_comment_count += 1
#-- END counter increment. --#
# create model instance.
# OLD - already have instance now.
#django_comment = reddit_collect.models.Comment()
# set fields from reddiwrap instance.
django_comment.set_fields_from_reddiwrap( current_rw_comment, self.convert_4_byte_unicode_to_entity )
# if post, set post (better be a post).
if ( ( post_IN ) and ( post_IN is not None ) ):
# store reference to post in comment.
django_comment.post = post_IN
# does the post reference a subreddit?
if ( ( post_IN.subreddit ) and ( post_IN.subreddit is not None ) ):
# yes - put reference to it in comment, as well.
django_comment.subreddit = post_IN.subreddit
#-- END check to see if post has a subreddit. --#
#-- END check to see if related post passed in. --#
# if parent comment, set it.
if ( ( parent_comment_IN ) and ( parent_comment_IN is not None ) ):
django_comment.parent = parent_comment_IN
#- END check to see if parent_comment_IN --#
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_comment.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving comment."
exception_message = "In " + me + ": reddit comment " + comment_reddit_full_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
#-- END check to see if already in database --#
# does current comment have children?
comment_children = current_rw_comment.children
if ( ( comment_children ) and ( len( comment_children ) > 0 ) ):
if ( self.do_output_details == True ):
print( "====> In " + me + ": processing children of reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# yes. Recurse!
child_count = self.process_comments( post_IN, comment_children, django_comment, do_update_existing_IN )
# add child count to comment_count
comment_count += child_count
#-- END check to see if there are comments --#
#-- END loop over comments. --#
#-- END check to see if comments. --#
# return comment_count
comment_count_OUT = comment_count
return comment_count_OUT
#-- END method process_comments --#
def process_comments_bulk( self,
post_IN = None,
comment_list_IN = [],
do_update_existing_IN = True,
level_IN = 0,
*args,
**kwargs ):
'''
Accepts django reddit_collect.models.Post instance, list of reddiwrap
comment instances. Loops over all comments in the list, processing
each, then checking for child comments. If child(ren) found, calls
this routine again, passing post and list of children, so they
reference root parent post. Returns list of comments
that need to be bulk saved. This method stores reddit IDs so comment
relations can be pieced together, but doesn't create django relations,
as well. The process_comments() method creates all django
relations as well as storing IDs from reddit. Lots more queries,
though.
Parameters:
- post_IN - reddit_collect.models.Post instance, so we can relate comments to their post.
- comment_list_IN - list of reddiwrap Comment instances we are to store in the database.
'''
# return reference
comment_list_OUT = []
# declare variables
me = "process_comments_bulk"
comment_count = -1
new_comment_count = -1
updated_comment_count = -1
current_rw_comment = None
comment_reddit_full_id = ""
django_comment = None
found_existing = False
django_do_bulk_create = False
comment_children = None
child_comment_list = []
django_bulk_create_count = -1
# initialize variables
comment_count = 0
new_comment_count = 0
updated_comment_count = 0
if ( self.do_output_details == True ):
print( "In " + me + ": at level " + str( level_IN ) + " - update existing?: " + str( do_update_existing_IN ) )
#-- END check to see if outputting details --#
# do we have a comment list
if ( ( comment_list_IN ) and ( len( comment_list_IN ) > 0 ) ):
# we have comments. Loop over them.
for current_rw_comment in comment_list_IN:
# increment count
comment_count += 1
# reset found flag
found_existing = False
# get the full ID
comment_reddit_full_id = current_rw_comment.name
# ==> comment already in database?
try:
# lookup comment.
django_comment = reddit_collect.models.Comment.objects.get( reddit_full_id = comment_reddit_full_id )
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " IS ALREADY in database." )
#-- END check to see if outputting details. --#
# increment updated count
updated_comment_count += 1
# set found flag.
found_existing = True
except:
# Not found. Make new instance.
django_comment = reddit_collect.models.Comment()
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " NOT in database." )
#-- END check to see if outputting details. --#
# not in database. Add it.
new_comment_count += 1
# set found flag.
found_existing = False
#-- END - check for comment in database --#
# set fields from reddiwrap instance.
django_comment.set_fields_from_reddiwrap( current_rw_comment, self.convert_4_byte_unicode_to_entity )
# if post, set post (better be a post).
if ( ( post_IN ) and ( post_IN != None ) ):
# store reference to post in comment.
django_comment.post = post_IN
# does the post reference a subreddit?
if ( ( post_IN.subreddit ) and ( post_IN.subreddit is not None ) ):
# yes - put reference to it in comment, as well.
django_comment.subreddit = post_IN.subreddit
#-- END check to see if post has a subreddit. --#
#-- END check to see if related post passed in. --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
if ( found_existing == False ):
# append instance to list
comment_list_OUT.append( django_comment )
if ( self.do_output_details == True ):
print( "====> In " + me + ": new reddit comment " + comment_reddit_full_id + " ADDED to bulk list." )
#-- END check to see if outputting details. --#
# if existing, are we to update?
elif ( ( found_existing == True ) and ( do_update_existing_IN == True ) ):
# save updates to existing comment.
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_comment.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving comment."
exception_message = "In " + me + ": reddit comment " + comment_reddit_full_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
if ( self.do_output_details == True ):
print( "====> In " + me + ": existing reddit comment " + comment_reddit_full_id + " UPDATED." )
#-- END check to see if outputting details. --#
#-- END check to see if already in database --#
# does current comment have children?
comment_children = current_rw_comment.children
if ( ( comment_children ) and ( len( comment_children ) > 0 ) ):
if ( self.do_output_details == True ):
print( "======> In " + me + ": processing children of reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# yes. Recurse!
child_comment_list = self.process_comments_bulk( post_IN = post_IN, comment_list_IN = comment_children, do_update_existing_IN = do_update_existing_IN, level_IN = level_IN + 1 )
# add instances in child list to the return list.
comment_list_OUT.extend( child_comment_list )
#-- END check to see if there are child comments --#
#-- END loop over comments. --#
# update count of comments created or updated.
self.bulk_comments_processed += new_comment_count
self.bulk_comments_processed += updated_comment_count
# do bulk_create()? Must be at calling level 0, and must have
# something in our list.
if ( ( level_IN == 0 ) and ( comment_list_OUT ) and ( len( comment_list_OUT ) > 0 ) ):
# get count
django_bulk_create_count = len( comment_list_OUT )
if ( self.do_output_details == True ):
print( "In " + me + ": at level 0 - bulk creating " + str( django_bulk_create_count ) + " comments." )
#-- END check to see if outputting details --#
# try/except around saving.
try:
# try bulk create.
reddit_collect.models.Comment.objects.bulk_create( comment_list_OUT )
except Exception as e:
# error saving. Probably encoding error.
# send email about problems
error_email_subject = "Problem bulk-saving comments."
exception_message = "In " + me + ": bulk_create() threw exception"
if ( ( post_IN ) and ( post_IN != None ) ):
exception_message += ", processing comments for post " + str( post_IN.id ) + " ( reddit ID: " + post_IN.reddit_id + " )"
#-- END check to see if post passed in (there better be!) --#
exception_message += " - count of comments being bulk created = " + str( django_bulk_create_count )
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try/except around saving. --#
#-- END check to see if anything to bulk create. --#
#-- END check to see if comments. --#
return comment_list_OUT
#-- END method process_comments_bulk --#
def process_exception( self, exception_IN = None, message_IN = "", send_email_IN = False, email_subject_IN = "", *args, **kwargs ):
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
my_exception_helper = ""
# get exception helper
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper != None ) ):
# process using exception_helper.
status_OUT = my_exception_helper.process_exception( exception_IN, message_IN, send_email_IN, email_subject_IN )
#-- END check to see if we have a helper --#
return status_OUT
#-- END method process_exception() --#
def set_email_status_address( self, value_IN, *args, **kwargs ):
'''
Accepts an email address, stores it internally, and in the nested
exception helper, so it can be used to email exception messages.
'''
# declare variables
my_exception_helper = None
# store the value
self.email_status_address = value_IN
# see if we have an exception helper.
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper != None ) ):
# we do. Set it there, too.
my_exception_helper.email_status_address = value_IN
#-- END check to see if we have an exception helper --#
#-- END method set_email_status_address() --#
#-- END class RedditCollector. --#
'''
#================================================================================
# Original Code
#================================================================================
reddit = ReddiWrap(user_agent='ReddiWrap')
USERNAME = 'Mr_Boy'
PASSWORD = 'Iman1234'
SUBREDDIT_NAMES = ['POLITICS', 'FUNNY', 'PICS' , 'todayilearned'];
while True:
for MOD_SUB in SUBREDDIT_NAMES:
print "######### " + MOD_SUB + " ###########";
# Load cookies from local file and verify cookies are valid
reddit.load_cookies('cookies.txt')
# If we had no cookies, or cookies were invalid,
# or the user we are logging into wasn't in the cookie file:
if not reddit.logged_in or reddit.user.lower() != USERNAME.lower():
print('logging into %s' % USERNAME)
login = reddit.login(user=USERNAME, password=PASSWORD)
if login != 0:
# 1 means invalid password, 2 means rate limited, -1 means unexpected error
print('unable to log in: %d' % login)
print('remember to change USERNAME and PASSWORD')
exit(1)
# Save cookies so we won't have to log in again later
reddit.save_cookies('cookies.txt')
print('logged in as %s' % reddit.user)
# uinfo = reddit.user_info()
# print('\nlink karma: %d' % uinfo.link_karma)
# print('comment karma: %d' % uinfo.comment_karma)
# created = int(uinfo.created)
# print('account created on: %s' % reddit.time_to_date(created))
# print('time since creation: %s\n' % reddit.time_since(created))
# # # # # # # # Finding Subreddit
print "Finding Subreddit ..."
subreddit = "";
flag = False; # if we find the subreddit, this flag is going to be Ture
while True:
subreddits = reddit.get('/reddits');
for subred in subreddits:
if subred.display_name == MOD_SUB.lower():
subreddit = subred;
flag = True;
break
if (not reddit.has_next()) or flag:
break;
time.sleep(2);
subreddits = reddit.get_next()
# # # # # # # # saving subreddit in subreddit table
print "Saving Subreddit ... ";
over18 = 0;
if subreddit.over18 :
over18 = 1;
if not myLib.exsits_row(subreddit.id, "Subreddit"):
myLib.insert_row([subreddit.id, subreddit.name, subreddit.display_name, subreddit.title, subreddit.url, subreddit.description,
subreddit.created, over18, int(subreddit.subscribers), subreddit.header_title] , "Subreddit");
# # # # # # # # Saving Posts
print "saving Posts ... "
posts = reddit.get('/r/%s' % MOD_SUB)
while True:
for post in posts:
if not myLib.exsits_row(post.id, "Post"):
# add the post to the Post table
myLib.insert_row(myLib.retrieve_post_traits(post), 'Post');
if not reddit.has_next():
break
time.sleep(2);
posts = reddit.get_next()
# subreddit = myLib.retreive_subreddit(MOD_SUB.lower());
posts = myLib.posts_of_reddit(subreddit.name); # corrent
print "saving Comments ... ";
i = 0;
for post in posts:
pst = myLib.make_post_obj(post);
reddit.fetch_comments(pst);
myLib.iterate_comments(pst.comments); # iterates and save comments
time.sleep(1);
i = i + 1;
print i;
''' | gpl-3.0 | 8,925,778,869,448,619,000 | 40.713745 | 358 | 0.461784 | false | 4.901453 | false | false | false |
hgdeoro/pilas | pilasengine/interprete/io.py | 1 | 1571 | # -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
class Output(object):
"Representación abstracta de un archivo de salida de datos."
def __init__(self, destino):
self.destino = destino
class ErrorOutput(Output):
"Representa la salida de error en la consola."
def write(self, linea):
self.destino.stdout_original.write(linea)
# Solo muestra el error en consola si es un mensaje util.
if "Traceback (most" in linea or 'File "<input>", line 1' in linea:
self.destino.ensureCursorVisible()
return
if linea.startswith(' File "'):
linea = linea.replace("File", "en el archivo")
linea = linea.replace('line', 'linea')
linea = linea[:linea.find(', in')]
if 'NameError' in linea:
linea = linea.replace('name', 'el nombre').replace('is not defined', 'no existe')
self.destino.insertar_error(linea.decode('utf-8'))
self.destino.ensureCursorVisible()
class NormalOutput(Output):
"Representa la salida estándar de la consola."
def write(self, linea):
self.destino.stdout_original.write(linea)
self.destino.imprimir_linea(linea.decode('utf-8'))
self.destino.ensureCursorVisible()
if '<bound method' in linea:
print "\n\n ... Hey, tal vez olvidaste poner () al final de la anterior sentencia no?"
| lgpl-3.0 | 4,202,209,198,047,022,600 | 33.108696 | 98 | 0.634162 | false | 3.228395 | false | false | false |
SocialCognitiveSystems/PRIMO | setup.py | 1 | 2744 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PRIMO2 -- Probabilistic Inference Modules.
# Copyright (C) 2013-2017 Social Cognitive Systems Group,
# Faculty of Technology, Bielefeld University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import setuptools
import sys
from primo2 import __version__
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
print()
setuptools.setup(
name="primo2",
version=__version__,
description="PRIMO -- PRobabilistic Inference MOdules",
long_description="This project is a (partial) reimplementation of the original " \
"probabilistic inference modules which can be found at " \
"https://github.com/hbuschme/PRIMO. This reimplementation " \
"follows the same general idea, but restructured and unified the " \
"underlying datatypes to allow a more concise API and more efficient " \
"manipulation, e.g. by the inference algorithm. In turn the inference " \
"algorithms have been rewritten and partly extended. For most if not " \
"all use cases this implementation should be easier to use and more " \
"performant than the original.",
url='http://github.com/SocialCognitiveSystems/PRIMO/',
license='GNU Lesser General Public License v3 or later (LGPLv3+)',
maintainer="Jan Pöppel",
maintainer_email="[email protected]",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
packages=[
"primo2",
"primo2.inference"
],
install_requires = [
"lxml",
"numpy",
"networkx",
"six"
],
)
| lgpl-3.0 | 3,088,769,804,361,078,000 | 38.753623 | 93 | 0.660955 | false | 4.010234 | false | false | false |
macarthur-lab/xbrowse | xbrowse_server/base/management/commands/load_exome_depth.py | 1 | 1027 | import os
from django.conf import settings
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project
from xbrowse_server.mall import get_cnv_store
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
def handle(self, *args, **options):
project_id = args[0]
project = Project.objects.get(project_id=project_id)
file_map = {}
for path in os.listdir(args[1]):
sample = path.replace('.bam.csv', '')
sample = sample.replace('.', '')
sample = sample.replace('-', '')
file_map[sample] = os.path.abspath(os.path.join(args[1], path))
for indiv in project.get_individuals():
sample = indiv.indiv_id.replace('-', '')
if sample in file_map:
indiv.exome_depth_file = file_map[sample]
indiv.save()
get_cnv_store().add_sample(str(indiv.pk), open(indiv.exome_depth_file)) | agpl-3.0 | -8,306,639,866,769,281,000 | 32.16129 | 87 | 0.599805 | false | 3.748175 | false | false | false |
zsiki/ls | network_dialog.py | 1 | 9919 | # -*- coding: utf-8 -*-
"""
.. module:: network_dialog
:platform: Linux, Windows
:synopsis: GUI for adjusment calculation
.. moduleauthor: Zoltan Siki <[email protected]>
"""
import platform
import webbrowser
from PyQt4.QtGui import QDialog, QFont, QMessageBox
from PyQt4.QtCore import QSettings
import config
from network_calc import Ui_NetworkCalcDialog
from base_classes import *
from surveying_util import *
from gama_interface import *
class NetworkDialog(QDialog):
""" Class for network calculation dialog
"""
def __init__(self, log):
""" Initialize dialog data and event handlers
:param log: log instance for log messages
"""
super(NetworkDialog, self).__init__()
self.log = log
self.ui = Ui_NetworkCalcDialog()
self.ui.setupUi(self)
self.points = []
self.fix = []
self.adj = []
# event handling
self.ui.CloseButton.clicked.connect(self.onCloseButton)
self.ui.ResetButton.clicked.connect(self.onResetButton)
self.ui.AddFixButton.clicked.connect(self.onAddFixButton)
self.ui.AddAdjButton.clicked.connect(self.onAddAdjButton)
self.ui.RemoveFixButton.clicked.connect(self.onRemoveFixButton)
self.ui.RemoveAdjButton.clicked.connect(self.onRemoveAdjButton)
self.ui.CalcButton.clicked.connect(self.onCalcButton)
self.ui.HelpButton.clicked.connect(self.onHelpButton)
def showEvent(self, event):
""" Set up initial state of dialog widgets
:param event: NOT USED
"""
if platform.system() == 'Linux':
# change font
fontname = QSettings().value("SurveyingCalculation/fontname",config.fontname)
fontsize = int(QSettings().value("SurveyingCalculation/fontsize",config.fontsize))
self.ui.ResultTextBrowser.setFont(QFont(fontname, fontsize))
log_path = QSettings().value("SurveyingCalculation/log_path",config.log_path)
self.log.set_log_path(log_path)
self.reset()
def reset(self):
""" Reset dialog to initial state
"""
self.points = get_measured()
self.fix = []
self.adj = []
# clear lists
self.ui.PointsList.clear()
self.ui.FixList.clear()
self.ui.AdjustedList.clear()
self.ui.ResultTextBrowser.clear()
i = 0
if self.points is not None:
for p in self.points:
self.ui.PointsList.addItem(p[0])
if p[1]:
item = self.ui.PointsList.item(i)
itemfont = item.font()
itemfont.setWeight(QFont.Bold)
item.setFont(itemfont)
i += 1
def onCloseButton(self):
""" Close dialog after Close button pressed
"""
self.accept()
def onResetButton(self):
""" Reset dialog to initial state after Reset button pressed
"""
self.reset()
def onAddFixButton(self):
""" Move selected points to fix point list
"""
selected = self.ui.PointsList.selectedItems()
for item in selected:
i = self.ui.PointsList.row(item)
if self.points[i][1]:
self.ui.FixList.addItem(self.ui.PointsList.takeItem(i))
self.fix.append(self.points[i])
del self.points[i]
def onAddAdjButton(self):
""" Move selected points to adjusted list
"""
selected = self.ui.PointsList.selectedItems()
for item in selected:
i = self.ui.PointsList.row(item)
self.ui.AdjustedList.addItem(self.ui.PointsList.takeItem(i))
self.adj.append(self.points[i])
del self.points[i]
def onRemoveFixButton(self):
""" Move back selected points from fixed list
"""
selected = self.ui.FixList.selectedItems()
for item in selected:
i = self.ui.FixList.row(item)
self.ui.PointsList.addItem(self.ui.FixList.takeItem(i))
self.points.append(self.fix[i])
del self.fix[i]
def onRemoveAdjButton(self):
""" Move back selected points from adjusted list
"""
selected = self.ui.AdjustedList.selectedItems()
for item in selected:
i = self.ui.AdjustedList.row(item)
self.ui.PointsList.addItem(self.ui.AdjustedList.takeItem(i))
self.points.append(self.adj[i])
del self.adj[i]
def onCalcButton(self):
""" Collect observations and adjust network
"""
if len(self.adj):
dimension = int(self.ui.DimensionComboBox.currentText())
conf = float(self.ui.ConfidenceComboBox.currentText())
try:
stda = float(self.ui.AngleDevLineEdit.text())
stdd = float(self.ui.DistDevMMLineEdit.text())
stdd1 = float(self.ui.DistDevMMKMLineEdit.text())
except ValueError:
QMessageBox.warning(self, tr("Warning"), tr("Invalid standard deviation value"))
return
g = GamaInterface(dimension, conf, stda, stdd, stdd1)
# add points to adjustment
fix_names = []
adj_names = []
for fp in self.fix:
p = get_coord(fp[0])
g.add_point(p, 'FIX')
fix_names.append(fp[0])
for fp in self.adj:
p = get_coord(fp[0])
if p is None:
p = Point(fp[0])
g.add_point(p, 'ADJ')
adj_names.append(fp[0])
# add observations to adjustment
fb_list = get_fblist()
if fb_list is None:
return None
for fb in fb_list:
lay = get_layer_by_name(fb)
if lay is None:
continue
st = None
n_ori = 0 # number of orientation directions
n_adj = 0 # number of adjusted targets
#for feat in lay.getFeatures():
sorted_features = sorted(lay.getFeatures(), key=lambda x: x["id"])
for feat in sorted_features:
pid = feat['point_id']
if feat['station'] == 'station':
if st is not None and dimension in [2, 3]:
if (n_ori + n_adj == 0) or \
(st in fix_names and n_adj == 0):
# no adjusted point on known station, remove it
g.remove_last_observation(True)
st = None
n_ori = 0 # number of orientation directions
n_adj = 0 # number of adjusted targets
if pid in fix_names or pid in adj_names:
st = pid
o = PolarObservation(pid, feat['station'])
o.th = feat['th'] if type(feat['th']) is float else None
o.pc = feat['pc'] if type(feat['pc']) is str else None
g.add_observation(o)
else:
if st is not None and (pid in fix_names or pid in adj_names):
if dimension in [2, 3] and (type(feat['hz']) is float or \
type(feat['v']) is float and type(feat['sd']) is float) or \
dimension == 1 and type(feat['v']) is float and \
type(feat['sd']) is float:
o = PolarObservation(pid, None)
o.hz = Angle(feat['hz'], 'GON') if type(feat['hz']) is float else None
o.v = Angle(feat['v'], 'GON') if type(feat['v']) is float else None
if type(feat['v']) is float and \
(st in adj_names or pid in adj_names):
# add zenith if one end is unknown
o.v = Angle(feat['v'], 'GON')
if type(feat['sd']) is float and \
(st in adj_names or pid in adj_names):
# add distance if one end is unknown
o.d = Distance(feat['sd'], 'SD')
o.th = feat['th'] if type(feat['th']) is float else None
o.pc = feat['pc'] if type(feat['pc']) is str else None
if dimension in [2, 3] and (o.hz is not None or o.d is not None) or \
dimension == 1 and o.v is not None:
# direction or distance given
g.add_observation(o)
if pid in fix_names:
n_ori += 1
if pid in adj_names:
n_adj += 1
t = g.adjust()
if t is None:
# adjustment failed
QMessageBox.warning(self, tr("Warning"),
tr('gama-local not installed or other runtime error'))
else:
self.ui.ResultTextBrowser.append(t)
self.log.write_log(tr("Network adjustment"))
self.log.write(t)
else:
QMessageBox.warning(self, tr("Warning"),
tr('No points to adjust'))
def onHelpButton(self):
""" Open user's guide at Network Adjustment in the default web browser.
"""
webbrowser.open("http://www.digikom.hu/SurveyingCalculation/usersguide.html#network-adjustment")
| gpl-2.0 | -1,297,909,990,585,258,500 | 41.75431 | 104 | 0.506402 | false | 4.325774 | false | false | false |
Pikecillo/genna | external/PyXML-0.8.4/xml/dom/html/HTMLParamElement.py | 10 | 1949 | ########################################################################
#
# File Name: HTMLParamElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLParamElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="PARAM"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_name(self):
return self.getAttribute("NAME")
def _set_name(self, value):
self.setAttribute("NAME", value)
def _get_type(self):
return self.getAttribute("TYPE")
def _set_type(self, value):
self.setAttribute("TYPE", value)
def _get_value(self):
return self.getAttribute("VALUE")
def _set_value(self, value):
self.setAttribute("VALUE", value)
def _get_valueType(self):
return string.capitalize(self.getAttribute("VALUETYPE"))
def _set_valueType(self, value):
self.setAttribute("VALUETYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"name" : _get_name,
"type" : _get_type,
"value" : _get_value,
"valueType" : _get_valueType
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"name" : _set_name,
"type" : _set_type,
"value" : _set_value,
"valueType" : _set_valueType
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| gpl-2.0 | -4,082,228,738,923,556,400 | 26.450704 | 77 | 0.608517 | false | 3.755299 | false | false | false |
avaitla/Haskell-to-C---Bridge | pygccxml-1.0.0/unittests/declaration_files_tester.py | 1 | 1778 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml.declarations import *
class tester_t( parser_test_case.parser_test_case_t ):
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.__files = [
'core_ns_join_1.hpp'
, 'core_ns_join_2.hpp'
, 'core_ns_join_3.hpp'
, 'core_membership.hpp'
, 'core_class_hierarchy.hpp'
, 'core_types.hpp'
, 'core_diamand_hierarchy_base.hpp'
, 'core_diamand_hierarchy_derived1.hpp'
, 'core_diamand_hierarchy_derived2.hpp'
, 'core_diamand_hierarchy_final_derived.hpp'
, 'core_overloads_1.hpp'
, 'core_overloads_2.hpp'
]
def test(self):
prj_reader = project_reader_t( self.config )
decls = prj_reader.read_files( self.__files
, compilation_mode=COMPILATION_MODE.ALL_AT_ONCE )
files = declaration_files( decls )
result = set()
for fn in files:
result.add( os.path.split( fn )[1] )
self.failUnless( set( self.__files ).issubset( result ) )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
| bsd-3-clause | 8,640,207,445,008,429,000 | 31.54717 | 92 | 0.57649 | false | 3.380228 | true | false | false |
pirsquare/askme-python | setup.py | 1 | 1573 | import sys
from setuptools import find_packages
from setuptools import setup
import io
import os
VERSION = '0.0.4'
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(fpath(filename), encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
def get_requirements():
requires = [line.rstrip('\n') for line in open(fpath('requirements.txt'))]
if sys.version_info[:2] == (2, 6):
# For python2.6 we have to require argparse since it was not in stdlib until 2.7.
requires.append('argparse')
return requires
setup_args = dict(
name='askme',
description='AskMe Python Client',
url='https://github.com/pirsquare/askme-python',
version=VERSION,
license='MIT',
packages=find_packages(exclude=['tests']),
package_data={'askme': ['source/data/*.json']},
entry_points={
'console_scripts': [
'askme = askme.main:main',
],
},
include_package_data=True,
install_requires=get_requirements(),
author='Ryan Liao',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setup_args)
| mit | -451,297,514,957,354,100 | 25.216667 | 89 | 0.614749 | false | 3.709906 | false | false | false |
Breta01/handwriting-ocr | src/ocr/page.py | 1 | 4255 | # -*- coding: utf-8 -*-
"""
Crop background and transform perspective from the photo of page
"""
import numpy as np
import cv2
from .helpers import *
def detection(image, area_thresh = 0.5):
"""Finding Page."""
small = resize(image)
# Edge detection
image_edges = _edges_detection(small, 200, 250)
# Close gaps between edges (double page clouse => rectangle kernel)
closed_edges = cv2.morphologyEx(image_edges,
cv2.MORPH_CLOSE,
np.ones((5, 11)))
# Countours
page_contour = _find_page_contours(closed_edges, small, area_thresh)
# Recalculate to original scale
page_contour = page_contour.dot(ratio(image, small.shape[0]))
# Transform prespective
new_image = _persp_transform(image, page_contour)
return new_image
def _edges_detection(img, minVal, maxVal):
"""Preprocessing (gray, thresh, filter, border) + Canny edge detection."""
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bilateralFilter(img, 9, 75, 75)
img = cv2.adaptiveThreshold(img, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 115, 4)
# Median blur replace center pixel by median of pixels under kelner
# => removes thin details
img = cv2.medianBlur(img, 11)
# Add black border - detection of border touching pages
img = cv2.copyMakeBorder(img, 5, 5, 5, 5,
cv2.BORDER_CONSTANT,
value=[0, 0, 0])
return cv2.Canny(img, minVal, maxVal)
def _four_corners_sort(pts):
"""Sort corners in order: top-left, bot-left, bot-right, top-right."""
diff = np.diff(pts, axis=1)
summ = pts.sum(axis=1)
return np.array([pts[np.argmin(summ)],
pts[np.argmax(diff)],
pts[np.argmax(summ)],
pts[np.argmin(diff)]])
def _contour_offset(cnt, offset):
"""Offset contour because of 5px border."""
cnt += offset
cnt[cnt < 0] = 0
return cnt
def _find_page_contours(edges, img, area_thresh):
"""Finding corner points of page contour."""
contours, hierarchy = cv2.findContours(edges,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# Finding biggest rectangle otherwise return original corners
height = edges.shape[0]
width = edges.shape[1]
MIN_COUNTOUR_AREA = height * width * area_thresh
MAX_COUNTOUR_AREA = (width - 10) * (height - 10)
max_area = MIN_COUNTOUR_AREA
page_contour = np.array([[0, 0],
[0, height-5],
[width-5, height-5],
[width-5, 0]])
for cnt in contours:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.03 * perimeter, True)
# Page has 4 corners and it is convex
if (len(approx) == 4 and
cv2.isContourConvex(approx) and
max_area < cv2.contourArea(approx) < MAX_COUNTOUR_AREA):
max_area = cv2.contourArea(approx)
page_contour = approx[:, 0]
# Sort corners and offset them
page_contour = _four_corners_sort(page_contour)
return _contour_offset(page_contour, (-5, -5))
def _persp_transform(img, s_points):
"""Transform perspective from start points to target points."""
# Euclidean distance - calculate maximum height and width
height = max(np.linalg.norm(s_points[0] - s_points[1]),
np.linalg.norm(s_points[2] - s_points[3]))
width = max(np.linalg.norm(s_points[1] - s_points[2]),
np.linalg.norm(s_points[3] - s_points[0]))
# Create target points
t_points = np.array([[0, 0],
[0, height],
[width, height],
[width, 0]], np.float32)
# getPerspectiveTransform() needs float32
if s_points.dtype != np.float32:
s_points = s_points.astype(np.float32)
M = cv2.getPerspectiveTransform(s_points, t_points)
return cv2.warpPerspective(img, M, (int(width), int(height))) | mit | 3,836,286,360,424,042,500 | 34.173554 | 78 | 0.565922 | false | 3.584667 | false | false | false |
harvard-lil/h2o | web/main/storages.py | 1 | 1697 | from datetime import datetime
import posixpath
from storages.backends.s3boto3 import S3Boto3Storage
from django.conf import settings
# used only for suppressing INFO logging in S3Boto3Storage
import logging
class S3Storage(S3Boto3Storage):
# suppress boto3's INFO logging per https://github.com/boto/boto3/issues/521
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
def augmented_listdir(self, name):
path = self._normalize_name(self._clean_name(name))
# The path needs to end with a slash, but if the root is empty, leave
# it.
if path and not path.endswith('/'):
path += '/'
files = []
paginator = self.connection.meta.client.get_paginator('list_objects')
pages = paginator.paginate(Bucket=self.bucket_name, Delimiter='/', Prefix=path)
for page in pages:
for entry in page.get('Contents', ()):
last_modified = entry.get('LastModified')
age = datetime.now(last_modified.tzinfo) - last_modified
files.append({'file_name': posixpath.relpath(entry['Key'], path),
'last_modified': last_modified,
'age': age})
return files
def get_s3_storage(bucket_name='h2o.images'):
# We're planning on supporting multiple storage solutions. I'm adding this
# unnecessary layer of abstraction now, to hopefully encourage design decisions
# that will make it easier to support multiple and customer-specific storages later.
return S3Storage(
**settings.S3_STORAGE,
bucket_name=bucket_name
)
| agpl-3.0 | 1,591,903,939,829,844,700 | 37.568182 | 88 | 0.652917 | false | 4.159314 | false | false | false |
robertsj/libdetran | src/python/pydetranutils/mesh_plot.py | 2 | 4018 | # This provides utilities for plotting things on a
# 1D or 2D mesh or a slice of a 3D mesh.
try :
import numpy as np
except ImportError :
print "Error importing Numpy."
try :
import matplotlib.pyplot as plt
import matplotlib
except ImportError :
print "Error importing matplotlib"
def plot_mesh_function(mesh, f, title="", colormap = "hot", edges = False, mybounds = [], myticks = []) :
""" Plot a mesh function.
"""
if mesh.dimension() == 1 :
# get the mesh points
x = mesh_axes(mesh)
# plot the map
plt.plot(x, f)
elif mesh.dimension() == 2 :
# Get the mesh axes and then make a grid of them for plotting.
x, y = mesh_axes(mesh)
X, Y = np.meshgrid(x, y)
# Reshape the function
f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())
if edges :
plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')
else :
plt.pcolor(X, Y, f, cmap=colormap)
plt.axis("scaled")
plt.xlabel("x [cm]")
plt.ylabel("y [cm]")
if len(myticks) :
cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)
else :
cbar = plt.colorbar()
else :
print "not ready for 3d"
return
plt.title(title)
# show the plot
plt.show()
def plot_mesh_map(mesh, key, edges = False) :
""" Plot a mesh map, optionally with edges explicitly displayed.
"""
# no 3D for now
if mesh.dimension() == 3 :
print "not ready for 3d"
return
# Check that the map exists and return if missing
if not mesh.mesh_map_exists(key) :
print "Mesh map", key, " does not exist"
return
# Get the map
map = np.asarray(mesh.mesh_map(key))
if (mesh.dimension() == 2) :
# reshape the map
map = map.reshape(mesh.number_cells_x(), mesh.number_cells_y())
# Choose a random color map for 2D plots
unique_elements = np.unique(map)
#num_unique_elements = len(unique_elements)
num_unique_elements = max(unique_elements)+1
colormap = matplotlib.colors.ListedColormap(np.random.rand(num_unique_elements, 3))
bounds = np.linspace(-0.5, num_unique_elements - 0.5, num_unique_elements+1)
ticks = bounds[0:num_unique_elements]+0.5
# Plot
plot_mesh_function(mesh, map, key, colormap, edges, bounds, ticks)
def plot_multigroup_flux(mesh, state, edges = False) :
""" Plot the multigroup fluxes.
For 1D, they are superimposed on one plot. In 2D, they
are split into subfigures for the number of groups. Obviously,
this can get cumbersome for many groups, so we kill it at 5+.
"""
if mesh.dimension() == 1 :
# get the mesh points
x = mesh_axes(mesh)
# plot the map
plt.plot(x, f)
elif mesh.dimension() == 2 :
# Get the mesh axes and then make a grid of them for plotting.
x, y = mesh_axes(mesh)
X, Y = np.meshgrid(x, y)
edgec = 'none'
if edges :
edgec = 'k'
plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)
else :
print "not ready for 3d"
return
# show the plot
plt.show()
def mesh_axes(mesh) :
""" Get the fine mesh points for plotting.
"""
if (mesh.dimension() == 1) :
# for 1D, we take the cell center points
x = np.zeros(mesh.number_cells_x())
x[0] = mesh.dx(0) * 0.5
for i in range(0, mesh.number_cells_x()-1) :
x[i + 1] = x[i] + 0.5*(mesh.dx(i) + mesh.dx(i+1))
return x
else :
# for 2D, we take the mesh edges
x = np.zeros(mesh.number_cells_x()+1)
y = np.zeros(mesh.number_cells_y()+1)
for i in range(0, mesh.number_cells_x()) :
x[i + 1] = x[i] + mesh.dx(i)
for j in range(0, mesh.number_cells_y()) :
y[j + 1] = y[j] + mesh.dy(j)
return (x, y)
| mit | -6,216,482,812,668,819,000 | 30.390625 | 105 | 0.558487 | false | 3.413764 | false | false | false |
MusicVisualizationUMass/TeamNameGenerator | src/musicvisualizer/pipeline/models/cirular_oscillator.py | 1 | 1151 | '''
circular_oscillator.py: defines a ModelledRepr extension that begins with a
unit circle on the screen parametrized by (r, theta) = (1, [0,2pi))
This will be the test case for the initial extension of ModelledRepr. We will
update/expand/tweak ModelledRepr based on this implementation to make further
extensions easier.
'''
from musicvisualizer.pipeline.ir import ModelledRepr
class CircularOscillatorModel(ModelledRepr):
# TODO: include params for ModelledRepr
def __init__(self,
sampleRate = 24,
sampleRange = (None, None),
dataIn = None,
parameters = None,
number_of_points = 1024):
super(ModelledRepr, self).__init__(sampleRate = sampleRate,
sampleRange = sampleRange,
dataIn = dataIn,
parameters = parameters)
self.number_of_points = number_of_points
self.points = [0.0 for i in range(self.number_of_points)]
def increment_time(self):
pass
| mit | -3,497,859,841,627,768,000 | 36.129032 | 77 | 0.56907 | false | 4.46124 | false | false | false |
bistack/pyStorageBenchmarkTools | raid_util.py | 1 | 11140 | #!/usr/bin/python
import os
import time
from misc_lib import run_command_list
from commands import getstatusoutput
import stat
class Speed:
def __init__(self):
self.__max = 0
self.__min = -1
self.__avg = 0
def record_value(self, val):
if float(val) > float(self.__max):
self.__max = float(val)
if self.__min < 0:
self.__min = val
elif float(val) < float(self.__min):
self.__min = float(val)
if self.__avg == 0:
self.__avg = float(val)
else:
self.__avg = '%.2f' % ((float(self.__avg) + float(val)) / 2)
def get_values(self):
return self.__max, self.__avg, self.__min
class Raid_Util:
def __init__(self, cmd_dir, src_dir):
self.__cmd_dir = cmd_dir
self.__src_dir = src_dir
self.__raid_txn = True
self.raid_name = None
self.__sys_name = None
self.sub_dev_list = []
self.__sub_dev_cnt = 0
self.cmd_args = ''
self.__stripe_cache_size = 16384
self.raid_sub_dev_size_KB = 0
self.raid_level = 6
def set_raid_level(self, level):
self.raid_level = level
def get_raid_level(self):
return self.raid_level
def get_lest_sub_dev_cnt(self):
if self.raid_level == 6:
return 4
else:
return 3
def set_raid_sub_dev_size_KB(self, size_kb):
self.raid_sub_dev_size_KB = size_kb
def get_raid_sub_dev_size_KB(self):
return self.raid_sub_dev_size_KB
def set_stripe_cache_size(self, size):
self.__stripe_cache_size = size
def get_stripe_cache_size(self):
return str(self.__stripe_cache_size)
def set_cmd_args(self, args):
self.cmd_args = args
def get_cmd_args(self):
return self.cmd_args
def set_cmd_dir(self, path):
self.__cmd_dir = path
def get_cmd_dir(self):
return self.__cmd_dir
def set_src_dir(self, path):
self.__src_dir = path
def get_src_dir(self):
return self.__src_dir
def set_raid_txn(self, is_txn):
self.__raid_txn = is_txn
def get_raid_txn(self):
return self.__raid_txn
def get_cmd(self):
return self.__cmd_dir + '/mdadm'
def set_sub_dev_list(self, dev_list):
size_kb = find_min_dev_size_kb(dev_list)
size_kb /= 1024
if size_kb > 0:
size_kb *= 1024
self.set_raid_sub_dev_size_KB(size_kb - 4*1024)
self.sub_dev_list = dev_list
def get_sub_dev_list(self):
return self.sub_dev_list
def add_sub_dev(self, dev):
self.sub_dev_list.append(dev)
def del_sub_dev(self, dev):
self.sub_dev_list.remove(dev)
def get_sub_dev_cnt(self):
self.__sub_dev_cnt = len(self.sub_dev_list)
return self.__sub_dev_cnt
def set_raid_name(self, name):
self.raid_name = name
def search_raid_dev_path(self):
dev_paths = self.get_sub_dev_list()
status = 0
for dev in dev_paths:
dev_name = dev.split(os.sep)[-1]
cmd = 'cat /proc/mdstat | grep' + dev_name
(status, output) = getstatusoutput(cmd)
if not status:
break
if not status and output:
return ''.join(['/dev/', output.split()[0]])
return None
def get_raid_path(self):
path = None
if os.path.exists('/dev/' + self.raid_name):
path = ''.join(['/dev/', self.raid_name])
elif os.path.exists('/dev/md/' + self.raid_name):
path = ''.join(['/dev/md/', self.raid_name])
if path:
return path
return self.search_raid_dev_path()
def get_sys_name(self):
if self.__sys_name:
return self.__sys_name
path = self.get_raid_path()
if not path:
return
mode = os.stat(path).st_mode
if stat.S_ISLNK(mode):
tgt = os.readlink(path)
else:
tgt = path
self.__sys_name = tgt.split(os.sep)[-1]
return self.__sys_name
def remove_raid(self):
path = self.get_raid_path()
if not path:
return
cmds = [' '.join([self.get_cmd(), '-S', path])]
(status, _) = run_command_list(cmds)
if not status:
self.__sys_name = None
def exit_raid(self):
cmds = ['rmmod raid456 md_mod',
'modprobe -r async_raid6_recov async_pq',
#'rmmod raid6_pq',
#'dmesg -C > /dev/null'
]
run_command_list(cmds)
def init_raid(self):
if self.get_raid_txn():
src_dir = self.get_src_dir()
cmds = ['insmod ' + src_dir + '/raid6_pq.ko',
'modprobe async_raid6_recov',
'insmod ' + src_dir + '/md-mod.ko',
'insmod ' + src_dir + '/raid456.ko']
else:
cmds = ['modprobe md_mod',
'modprobe raid456']
run_command_list(cmds)
def zero_raid_sub_dev(self, tgt = None):
raid_cmd = self.get_cmd()
if self.get_sub_dev_cnt() == 0:
return
if tgt:
devs = tgt
else:
devs = ' '.join(self.get_sub_dev_list())
cmds = [' '.join([raid_cmd, '--zero-superblock',
'--force', devs])]
run_command_list(cmds)
def change_raid_stripe_cache_size(self):
if not self.get_sys_name():
return
cmd_change = ''.join(['echo ', str(self.get_stripe_cache_size()),
' > /sys/block/', self.get_sys_name(),
'/md/stripe_cache_size'])
cmds = [cmd_change]
run_command_list(cmds)
def create_raid(self):
if self.get_sub_dev_cnt() < self.get_lest_sub_dev_cnt():
return
raid_cmd = self.get_cmd()
if self.get_raid_txn():
txn = '-T'
else:
txn = ''
devs = ' '.join(self.get_sub_dev_list())
cmd_create = ' '.join(['echo "y" |', raid_cmd,
'-C', '/dev/md/' + self.raid_name,
self.cmd_args,
'-n', str(self.get_sub_dev_cnt()),
'-l', str(self.get_raid_level()),
'-z', str(self.get_raid_sub_dev_size_KB()),
txn, devs])
cmds = [cmd_create]
(err, _) = run_command_list(cmds)
if err:
return
cmd_map = ' '.join(['cat /dev/md/md-device-map', '|',
'grep', self.raid_name])
(status, output) = getstatusoutput(cmd_map)
if not status:
dev_path = '/dev/' + output.split()[0]
else:
dev_path = self.search_raid_dev_path()
if dev_path:
cmd_link1 = ''.join(['ln -s ', dev_path,
' /dev/',self.raid_name])
cmd_link2 = ''.join(['ln -s ', dev_path,
' /dev/md/', self.raid_name])
cmds = [cmd_link1, cmd_link2]
run_command_list(cmds)
return
def assemble_raid(self):
raid_cmd = self.get_cmd()
devs = ' '.join(self.get_sub_dev_list())
cmds = [' '.join([raid_cmd, '-A', self.get_raid_path(), devs])]
run_command_list(cmds)
def show_raid_info(self):
if not self.get_sys_name():
return
cmds = ['cat /proc/mdstat',
''.join(['cat /sys/block/', self.get_sys_name(),
'/md/stripe_cache_size']),
'cat /proc/modules | grep raid456'
]
run_command_list(cmds)
def fail_one(self, index = 0):
if not self.get_sys_name():
return
tgt = self.get_sub_dev_list()[index]
cmd_fail = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--fail', tgt
])
cmd_remove = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--remove', tgt
])
cmds = [cmd_fail, cmd_remove]
(err, _) = run_command_list(cmds)
if not err:
self.del_sub_dev(tgt)
def fail_two(self, index1 = 0, index2 = 1):
self.fail_one(index1)
self.fail_one(index2)
def add_one(self, index = 0):
if not self.get_sys_name():
return
tgt = self.get_sub_dev_list()[index]
self.zero_raid_sub_dev(tgt)
cmd = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--add', tgt
])
cmds = [cmd]
run_command_list(cmds)
(err, _) = run_command_list(cmds)
if not err:
self.add_sub_dev(tgt)
def add_two(self, index1 = 0, index2 = 1):
self.add_one(index1)
self.add_one(index2)
def check_recovery_speed(self, speed_obj):
if not self.get_sys_name():
return 0
cmd = ' '.join(['cat /proc/mdstat | grep -A3', self.get_sys_name(),
'| grep speed'
])
cmds = [cmd]
(status, speed) = run_command_list(cmds)
if status:
return 0
speed_start = speed.find('speed=')
if speed_start < 0:
return 0
speed_start += len('speed=')
speed_end = -1
speed_units = ['K', 'M', 'G', 'B']
for unit in speed_units:
speed_end = speed[speed_start:].find(unit)
if speed_end >= 0:
break
if speed_end < 0:
print speed
return 0
speed_end += speed_start
speed_value = speed[speed_start: speed_end]
speed_obj.record_value(speed_value)
return 1
def wait_recovery_time(self, cnt = 100):
speed_obj = Speed()
for i in range(cnt):
time.sleep(1)
if i < 3:
continue
ret = self.check_recovery_speed(speed_obj)
if not ret:
break
print 'recovery speed (max avg min):', speed_obj.get_values()
def wait_sync(self):
speed_obj = Speed()
while self.check_recovery_speed(speed_obj):
time.sleep(5)
if speed_obj.__max:
print 'resync speed (max avg min):', speed_obj.get_values()
def find_min_dev_size_kb(dev_path_list):
min_size_kb = -1
for dev_path in dev_path_list:
name = dev_path.split(os.sep)[-1]
cmds = ['cat /proc/partitions | grep ' + name]
(status, line) = run_command_list(cmds)
if status:
continue
size_kb = int(line.split()[2])
if (size_kb < min_size_kb) or (min_size_kb < 0):
min_size_kb = size_kb
return min_size_kb
| gpl-2.0 | 2,847,210,083,783,606,300 | 27.274112 | 76 | 0.475673 | false | 3.572803 | false | false | false |
Alexis-benoist/eralchemy | setup.py | 1 | 2202 | from setuptools import setup
try:
with open('readme.rst') as f:
long_description = f.read()
except IOError:
with open('readme.md') as f:
long_description = f.read()
def read_version():
with open('eralchemy/version.py') as f:
code = f.readlines()[0]
exec(code)
assert ('version' in locals())
return locals()['version']
setup(
name='ERAlchemy',
version=read_version(),
description='Simple entity relation (ER) diagrams generation',
long_description=long_description,
# The project's main homepage.d
url='https://github.com/Alexis-benoist/eralchemy',
# Author details
author='Alexis Benoist',
author_email='[email protected]',
# Choose your license
license='Apache License 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Database',
],
# What does your project relate to?
keywords='sql relational databases ER diagram render',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[
'eralchemy',
],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'SQLAlchemy',
'pygraphviz'
],
entry_points={
'console_scripts': [
'eralchemy=eralchemy.main:cli',
],
},
)
| apache-2.0 | 6,218,801,769,933,792,000 | 27.597403 | 79 | 0.625341 | false | 4.123596 | false | false | false |
volatilityfoundation/volatility | volatility/plugins/linux/threads.py | 4 | 3427 | # Volatility
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
@author: Edwin Smulders
@license: GNU General Public License 2.0 or later
@contact: [email protected]
"""
import volatility.plugins.linux.pslist as linux_pslist
from volatility.renderers.basic import Address
from volatility.renderers import TreeGrid
class linux_threads(linux_pslist.linux_pslist):
""" Prints threads of processes """
def unified_output(self, data):
return TreeGrid([("Offset",Address),
("NameProc",str),
("TGID",int),
("ThreadPid",str),
("ThreadName", str),
("thread_offset",Address),
("Addr_limit",Address),
("uid_cred",int),
("gid_cred",int),
("euid_cred",int)
],
self.generator(data))
def generator(self, data):
for task in data:
euidcred = task.euid
uidcred = task.uid
gidcred = task.gid
for thread in task.threads():
addr_limit = self.get_addr_limit(thread)
yield(0,[Address(task.obj_offset),
str(task.comm),
int(task.tgid),
str(thread.pid),
str(thread.comm),
Address(thread.obj_offset),
Address(addr_limit),
int(uidcred),
int(gidcred),
int(euidcred)
])
def get_addr_limit(self,thread, addrvar_offset = 8 ):
"""
Here we read the addr_limit variable of a thread by reading at the offset of the thread plus
the offset of the addr_limit variable inside the thread_info
:param thread: thread from which we want the information
:param addrvar_offset: offset of the addr_limit var in the thread_info
:return: the addr_limit
"""
addr_space = thread.get_process_address_space()
offset = thread.obj_offset + addrvar_offset
if addr_space.__class__ == "LinuxAMD64PagedMemory":
return addr_space.read_long_long_phys(offset)
else:
return addr_space.read_long_phys(offset)
def render_text(self, outfd, data):
for task in data:
outfd.write("\nProcess Name: {}\nProcess ID: {}\n".format(task.comm, task.tgid))
self.table_header(outfd, [('Thread PID', '13'), ('Thread Name', '16')])
for thread in task.threads():
self.table_row(outfd, str(thread.pid), thread.comm)
| gpl-2.0 | 4,537,316,562,078,674,400 | 37.943182 | 100 | 0.565217 | false | 4.387964 | false | false | false |
COMBINE-lab/matryoshka_work | coredomains-import/python-src/ResVsDomSize.py | 1 | 1219 | from CoreDomain import *
import argparse
import pylab
import glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ourdoms',type=str)
parser.add_argument('--dixondoms',type=str)
parser.add_argument('--organism',type=str)
args = parser.parse_args()
if args.organism == 'mouse': chroms = chroms_mouse
elif args.organism == 'human': chroms = chroms_human
dixondoms = []
for chrom in chroms:
dixondoms += read_domains(args.dixondoms,chrom)
ourdoms = {}
for chrom in chroms:
for fname in glob.glob("{0}/*chr{1}.*.alpha*".format(args.ourdoms,chrom)):
gamma = float(fname.split("=")[1][0:-3])
if gamma not in ourdoms: ourdoms[gamma] = []
ourdoms[gamma] += read_domains(fname,chrom)
keys,values = zip(*sorted(ourdoms.items()))
pylab.plot(keys, [ max([d.len() for d in doms])
for doms in values],'o-')
pylab.plot(keys, [ pylab.mean([d.len() for d in doms])
for doms in values],'o-')
pylab.yscale('log')
#pylab.axhline(pylab.mean([d.len() for d in dixondoms]))
pylab.axhline(max([d.len() for d in dixondoms]))
pylab.show()
if __name__=="__main__": main() | gpl-3.0 | 1,189,682,126,824,560,000 | 28.756098 | 82 | 0.608696 | false | 3.117647 | false | false | false |
scottpurdy/nupic | src/nupic/support/unittesthelpers/abstract_temporal_memory_test.py | 10 | 2714 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
from nupic.algorithms.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from nupic.data.generators.sequence_machine import SequenceMachine
class AbstractTemporalMemoryTest(object):
__metaclass__ = ABCMeta
VERBOSITY = 1
@abstractmethod
def getTMClass(self):
"""
Implement this method to specify the Temporal Memory class.
"""
@abstractmethod
def getPatternMachine(self):
"""
Implement this method to provide the pattern machine.
"""
def getDefaultTMParams(self):
"""
Override this method to set the default TM params for `self.tm`.
"""
return {}
def setUp(self):
self.tm = None
self.patternMachine = self.getPatternMachine()
self.sequenceMachine = SequenceMachine(self.patternMachine)
def init(self, overrides=None):
"""
Initialize Temporal Memory, and other member variables.
:param overrides: overrides for default Temporal Memory parameters
"""
params = self._computeTMParams(overrides)
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
self.getTMClass()): pass
self.tm = MonitoredTemporalMemory(**params)
def feedTM(self, sequence, learn=True, num=1):
repeatedSequence = sequence * num
self.tm.mmClearHistory()
for pattern in repeatedSequence:
if pattern is None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
# ==============================
# Helper functions
# ==============================
def _computeTMParams(self, overrides):
params = dict(self.getDefaultTMParams())
params.update(overrides or {})
return params
| agpl-3.0 | -8,976,367,267,216,124,000 | 28.824176 | 74 | 0.661017 | false | 4.500829 | false | false | false |
stormi/tsunami | src/primaires/perso/masques/texte/__init__.py | 1 | 2742 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <texte_libre>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
class TexteLibre(Masque):
"""Masque <texte_libre>.
On attend un n'importe quoi en paramètre.
"""
nom = "texte_libre"
nom_complet = "texte libre"
def init(self):
"""Initialisation des attributs"""
self.texte = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque.
Le masque <texte_libre> prend tout le message.
"""
message = liste_vers_chaine(commande).lstrip()
self.a_interpreter = message
commande[:] = []
if not message:
raise ErreurValidation(
"Entrez quelque chose, au moins.")
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
message = self.a_interpreter
self.texte = message
return True
| bsd-3-clause | -4,873,071,298,220,123,000 | 35.533333 | 79 | 0.716058 | false | 3.982558 | false | false | false |
jeffhammond/bagel | src/basis/source/turbomole.py | 2 | 2240 | #!/usr/bin/python
import sys
import string
def flash(numbers):
m = len(numbers[0])
atom = " \"prim\" : ["
for p in numbers:
atom += p[0] + ", "
atom = atom[:-2] + "],\n"
atom += " \"cont\" : ["
for p in range(1,m):
atom += "["
for q in numbers:
atom += q[p] + ", "
atom = atom[:-2] + "],\n"
atom = atom[:-2] + "]"
return atom
if len(sys.argv) < 2:
sys.exit("specify the filename")
fp = open(sys.argv[1], "r");
lines = fp.read().split("\n")
dict = ["s", "p", "d", "f", "g", "h", "i"]
dictn = []
for i in range(0,100):
dictn.append(str(i))
out = []
out.append("{")
numbers = []
set = ""
atommark = 0
first = 1
for l in lines:
if len(l) == 0:
continue
elif l[0] == "*" or l[0] == ".":
continue
elif l.strip() == "":
continue
if len(l) > 6:
s = l.strip()
if s[3] in dict and s[0] in dictn:
set = s[3]
continue
if atommark == 1:
if len(numbers) > 0:
#flash
out.append(flash(numbers))
out.append(" }")
numbers = []
if (first == 0):
out.append(" ],")
first = 0
chars = list(l.split(" ")[0])
str = " \""+chars[0].upper()
if len(chars) > 1: str += "".join(chars[1:])
str += "\" : [\n"
str += " {"
out.append(str)
atommark = 0
continue
if len(l) > 3:
if l == "$end":
continue
elif l[0] == "$":
atommark = 1
continue
#when does not start with number skip it
if not (l.strip()[0] in dictn): continue
if set != "":
if len(numbers) > 0:
#flash
out.append(flash(numbers))
str = " }, {"
numbers = []
out.append(str)
out.append(" \"angular\" : \"" + set + "\",")
numbers.append(l.split())
set = ""
else:
numbers.append(l.split())
#flash here too
out.append(flash(numbers))
out.append(" }")
out.append(" ]")
out.append("}")
for l in out:
l = l.replace("D-", "E-")
l = l.replace("D+", "E+")
fp.close()
fp = open(sys.argv[1]+".json", "w")
fp.write("\n".join(out))
fp.close()
| gpl-3.0 | 5,006,210,983,696,538,000 | 19.740741 | 58 | 0.439732 | false | 3.031123 | false | false | false |
veroc/Bika-LIMS | bika/lims/content/worksheet.py | 2 | 38556 | from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _, logger
from bika.lims.config import *
from bika.lims.idserver import renameAfterCreation
from bika.lims.utils import t, tmpID, changeWorkflowState
from bika.lims.utils import to_utf8 as _c
from bika.lims.browser.fields import HistoryAwareReferenceField
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IWorksheet
from bika.lims.permissions import EditWorksheet, ManageWorksheets
from bika.lims.workflow import doActionFor
from bika.lims.workflow import skip
from DateTime import DateTime
from operator import itemgetter
from plone.indexer import indexer
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.ATContentTypes.lib.historyaware import HistoryAwareMixin
from Products.ATExtensions.ateapi import RecordsField
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode, _createObjectByType
from zope.interface import implements
import re
@indexer(IWorksheet)
def Priority(instance):
priority = instance.getPriority()
if priority:
return priority.getSortKey()
schema = BikaSchema.copy() + Schema((
HistoryAwareReferenceField('WorksheetTemplate',
allowed_types=('WorksheetTemplate',),
relationship='WorksheetAnalysisTemplate',
),
ComputedField('WorksheetTemplateTitle',
searchable=True,
expression="context.getWorksheetTemplate() and context.getWorksheetTemplate().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
RecordsField('Layout',
required=1,
subfields=('position', 'type', 'container_uid', 'analysis_uid'),
subfield_types={'position': 'int'},
),
# all layout info lives in Layout; Analyses is used for back references.
ReferenceField('Analyses',
required=1,
multiValued=1,
allowed_types=('Analysis', 'DuplicateAnalysis', 'ReferenceAnalysis', 'RejectAnalysis'),
relationship = 'WorksheetAnalysis',
),
StringField('Analyst',
searchable = True,
),
# TODO Remove. Instruments must be assigned directly to each analysis.
ReferenceField('Instrument',
required = 0,
allowed_types = ('Instrument',),
relationship = 'WorksheetInstrument',
referenceClass = HoldingReference,
),
TextField('Remarks',
searchable = True,
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
macro="bika_widgets/remarks",
label=_("Remarks"),
append_only=True,
),
),
StringField('ResultsLayout',
default = '1',
vocabulary = WORKSHEET_LAYOUT_OPTIONS,
),
),
)
schema['id'].required = 0
schema['id'].widget.visible = False
schema['title'].required = 0
schema['title'].widget.visible = {'edit': 'hidden', 'view': 'invisible'}
class Worksheet(BaseFolder, HistoryAwareMixin):
security = ClassSecurityInfo()
implements(IWorksheet)
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def Title(self):
return safe_unicode(self.getId()).encode('utf-8')
def getFolderContents(self, contentFilter):
# The bika_listing machine passes contentFilter to all
# contentsMethod methods. We ignore it.
return list(self.getAnalyses())
security.declareProtected(EditWorksheet, 'addAnalysis')
def addAnalysis(self, analysis, position=None):
"""- add the analysis to self.Analyses().
- position is overruled if a slot for this analysis' parent exists
- if position is None, next available pos is used.
"""
workflow = getToolByName(self, 'portal_workflow')
analysis_uid = analysis.UID()
parent_uid = analysis.aq_parent.UID()
analyses = self.getAnalyses()
layout = self.getLayout()
# check if this analysis is already in the layout
if analysis_uid in [l['analysis_uid'] for l in layout]:
return
# If the ws has an instrument assigned for which the analysis
# is allowed, set it
instr = self.getInstrument()
if instr and analysis.isInstrumentAllowed(instr):
# Set the method assigned to the selected instrument
analysis.setMethod(instr.getMethod())
analysis.setInstrument(instr)
self.setAnalyses(analyses + [analysis, ])
# if our parent has a position, use that one.
if analysis.aq_parent.UID() in [slot['container_uid'] for slot in layout]:
position = [int(slot['position']) for slot in layout if
slot['container_uid'] == analysis.aq_parent.UID()][0]
else:
# prefer supplied position parameter
if not position:
used_positions = [0, ] + [int(slot['position']) for slot in layout]
position = [pos for pos in range(1, max(used_positions) + 2)
if pos not in used_positions][0]
self.setLayout(layout + [{'position': position,
'type': 'a',
'container_uid': parent_uid,
'analysis_uid': analysis.UID()}, ])
allowed_transitions = [t['id'] for t in workflow.getTransitionsFor(analysis)]
if 'assign' in allowed_transitions:
workflow.doActionFor(analysis, 'assign')
# If a dependency of DryMatter service is added here, we need to
# make sure that the dry matter analysis itself is also
# present. Otherwise WS calculations refer to the DB version
# of the DM analysis, which is out of sync with the form.
dms = self.bika_setup.getDryMatterService()
if dms:
dmk = dms.getKeyword()
deps = analysis.getDependents()
# if dry matter service in my dependents:
if dmk in [a.getService().getKeyword() for a in deps]:
# get dry matter analysis from AR
dma = analysis.aq_parent.getAnalyses(getKeyword=dmk,
full_objects=True)[0]
# add it.
if dma not in self.getAnalyses():
self.addAnalysis(dma)
security.declareProtected(EditWorksheet, 'removeAnalysis')
def removeAnalysis(self, analysis):
""" delete an analyses from the worksheet and un-assign it
"""
workflow = getToolByName(self, 'portal_workflow')
# overwrite saved context UID for event subscriber
self.REQUEST['context_uid'] = self.UID()
workflow.doActionFor(analysis, 'unassign')
# Note: subscriber might unassign the AR and/or promote the worksheet
# remove analysis from context.Analyses *after* unassign,
# (doActionFor requires worksheet in analysis.getBackReferences)
Analyses = self.getAnalyses()
if analysis in Analyses:
Analyses.remove(analysis)
self.setAnalyses(Analyses)
layout = [slot for slot in self.getLayout() if slot['analysis_uid'] != analysis.UID()]
self.setLayout(layout)
if analysis.portal_type == "DuplicateAnalysis":
self._delObject(analysis.id)
def addReferences(self, position, reference, service_uids):
""" Add reference analyses to reference, and add to worksheet layout
"""
workflow = getToolByName(self, 'portal_workflow')
rc = getToolByName(self, REFERENCE_CATALOG)
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
ref_type = reference.getBlank() and 'b' or 'c'
ref_uid = reference.UID()
if position == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
position = highest_existing_position + 1
# LIMS-2132 Reference Analyses got the same ID
refgid = self.nextReferenceAnalysesGroupID(reference)
for service_uid in service_uids:
# services with dependents don't belong in references
service = rc.lookupObject(service_uid)
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)
ref_analysis = rc.lookupObject(ref_uid)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
ref_analysis.setReferenceAnalysesGroupID(refgid)
ref_analysis.reindexObject(idxs=["getReferenceAnalysesGroupID"])
# copy the interimfields
if calc:
ref_analysis.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': position,
'type': ref_type,
'container_uid': reference.UID(),
'analysis_uid': ref_analysis.UID()}])
self.setAnalyses(
self.getAnalyses() + [ref_analysis, ])
workflow.doActionFor(ref_analysis, 'assign')
def nextReferenceAnalysesGroupID(self, reference):
""" Returns the next ReferenceAnalysesGroupID for the given reference
sample. Gets the last reference analysis registered in the system
for the specified reference sample and increments in one unit the
suffix.
"""
bac = getToolByName(reference, 'bika_analysis_catalog')
ids = bac.Indexes['getReferenceAnalysesGroupID'].uniqueValues()
prefix = reference.id+"-"
rr = re.compile("^"+prefix+"[\d+]+$")
ids = [int(i.split(prefix)[1]) for i in ids if i and rr.match(i)]
ids.sort()
_id = ids[-1] if ids else 0
suffix = str(_id+1).zfill(int(3))
return '%s%s' % (prefix, suffix)
security.declareProtected(EditWorksheet, 'addDuplicateAnalyses')
def addDuplicateAnalyses(self, src_slot, dest_slot):
""" add duplicate analyses to worksheet
"""
rc = getToolByName(self, REFERENCE_CATALOG)
workflow = getToolByName(self, 'portal_workflow')
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
src_ar = [slot['container_uid'] for slot in layout if
slot['position'] == src_slot]
if src_ar:
src_ar = src_ar[0]
if not dest_slot or dest_slot == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
dest_slot = highest_existing_position + 1
src_analyses = [rc.lookupObject(slot['analysis_uid'])
for slot in layout if
int(slot['position']) == int(src_slot)]
dest_analyses = [rc.lookupObject(slot['analysis_uid']).getAnalysis().UID()
for slot in layout if
int(slot['position']) == int(dest_slot)]
refgid = None
processed = []
for analysis in src_analyses:
if analysis.UID() in dest_analyses:
continue
# If retracted analyses, for some reason, the getLayout() returns
# two times the regular analysis generated automatically after a
# a retraction.
if analysis.UID() in processed:
continue
# Omit retracted analyses
# https://jira.bikalabs.com/browse/LIMS-1745
# https://jira.bikalabs.com/browse/LIMS-2001
if workflow.getInfoFor(analysis, "review_state") == 'retracted':
continue
processed.append(analysis.UID())
# services with dependents don't belong in duplicates
service = analysis.getService()
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
service = analysis.getService()
_id = self._findUniqueId(service.getKeyword())
duplicate = _createObjectByType("DuplicateAnalysis", self, _id)
duplicate.setAnalysis(analysis)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
if not refgid and not analysis.portal_type == 'ReferenceAnalysis':
part = analysis.getSamplePartition().id
dups = [an.getReferenceAnalysesGroupID()
for an in self.getAnalyses()
if an.portal_type == 'DuplicateAnalysis'
and an.getSamplePartition().id == part]
dups = list(set(dups))
postfix = dups and len(dups) + 1 or 1
postfix = str(postfix).zfill(int(2))
refgid = '%s-D%s' % (part, postfix)
duplicate.setReferenceAnalysesGroupID(refgid)
duplicate.reindexObject(idxs=["getReferenceAnalysesGroupID"])
duplicate.processForm()
if calc:
duplicate.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': dest_slot,
'type': 'd',
'container_uid': analysis.aq_parent.UID(),
'analysis_uid': duplicate.UID()}, ]
)
self.setAnalyses(self.getAnalyses() + [duplicate, ])
workflow.doActionFor(duplicate, 'assign')
def applyWorksheetTemplate(self, wst):
""" Add analyses to worksheet according to wst's layout.
Will not overwrite slots which are filled already.
If the selected template has an instrument assigned, it will
only be applied to those analyses for which the instrument
is allowed
"""
rc = getToolByName(self, REFERENCE_CATALOG)
bac = getToolByName(self, "bika_analysis_catalog")
bc = getToolByName(self, 'bika_catalog')
layout = self.getLayout()
wstlayout = wst.getLayout()
services = wst.getService()
wst_service_uids = [s.UID() for s in services]
wst_slots = [row['pos'] for row in wstlayout if row['type'] == 'a']
ws_slots = [row['position'] for row in layout if row['type'] == 'a']
nr_slots = len(wst_slots) - len(ws_slots)
positions = [pos for pos in wst_slots if pos not in ws_slots]
analyses = bac(portal_type='Analysis',
getServiceUID=wst_service_uids,
review_state='sample_received',
worksheetanalysis_review_state='unassigned',
cancellation_state = 'active',
sort_on='getDueDate')
# ar_analyses is used to group analyses by AR.
ar_analyses = {}
instr = self.getInstrument() if self.getInstrument() else wst.getInstrument()
for brain in analyses:
analysis = brain.getObject()
if instr and brain.getObject().isInstrumentAllowed(instr) is False:
# Exclude those analyses for which the ws selected
# instrument is not allowed
continue
ar_id = brain.getRequestID
if ar_id in ar_analyses:
ar_analyses[ar_id].append(analysis)
else:
if len(ar_analyses.keys()) < nr_slots:
ar_analyses[ar_id] = [analysis, ]
# Add analyses, sorted by AR ID
ars = sorted(ar_analyses.keys())
for ar in ars:
for analysis in ar_analyses[ar]:
self.addAnalysis(analysis, position=positions[ars.index(ar)])
# find best maching reference samples for Blanks and Controls
for t in ('b', 'c'):
form_key = t == 'b' and 'blank_ref' or 'control_ref'
ws_slots = [row['position'] for row in layout if row['type'] == t]
for row in [r for r in wstlayout if
r['type'] == t and r['pos'] not in ws_slots]:
reference_definition_uid = row[form_key]
samples = bc(portal_type='ReferenceSample',
review_state='current',
inactive_state='active',
getReferenceDefinitionUID=reference_definition_uid)
if not samples:
break
samples = [s.getObject() for s in samples]
if t == 'b':
samples = [s for s in samples if s.getBlank()]
else:
samples = [s for s in samples if not s.getBlank()]
complete_reference_found = False
references = {}
for reference in samples:
reference_uid = reference.UID()
references[reference_uid] = {}
references[reference_uid]['services'] = []
references[reference_uid]['count'] = 0
specs = reference.getResultsRangeDict()
for service_uid in wst_service_uids:
if service_uid in specs:
references[reference_uid]['services'].append(service_uid)
references[reference_uid]['count'] += 1
if references[reference_uid]['count'] == len(wst_service_uids):
complete_reference_found = True
break
if complete_reference_found:
supported_uids = wst_service_uids
self.addReferences(int(row['pos']),
reference,
supported_uids)
else:
# find the most complete reference sample instead
reference_keys = references.keys()
no_of_services = 0
reference = None
for key in reference_keys:
if references[key]['count'] > no_of_services:
no_of_services = references[key]['count']
reference = key
if reference:
reference = rc.lookupObject(reference)
supported_uids = [s.UID() for s in reference.getServices()
if s.UID() in wst_service_uids]
self.addReferences(int(row['pos']),
reference,
supported_uids)
# fill duplicate positions
layout = self.getLayout()
ws_slots = [row['position'] for row in layout if row['type'] == 'd']
for row in [r for r in wstlayout if
r['type'] == 'd' and r['pos'] not in ws_slots]:
dest_pos = int(row['pos'])
src_pos = int(row['dup'])
if src_pos in [int(slot['position']) for slot in layout]:
self.addDuplicateAnalyses(src_pos, dest_pos)
# Apply the wst instrument to all analyses and ws
if instr:
self.setInstrument(instr, True)
def exportAnalyses(self, REQUEST=None, RESPONSE=None):
""" Export analyses from this worksheet """
import bika.lims.InstrumentExport as InstrumentExport
instrument = REQUEST.form['getInstrument']
try:
func = getattr(InstrumentExport, "%s_export" % instrument)
except:
return
func(self, REQUEST, RESPONSE)
return
security.declarePublic('getWorksheetServices')
def getWorksheetServices(self):
""" get list of analysis services present on this worksheet
"""
services = []
for analysis in self.getAnalyses():
service = analysis.getService()
if service not in services:
services.append(service)
return services
security.declareProtected(EditWorksheet, 'resequenceWorksheet')
def resequenceWorksheet(self, REQUEST=None, RESPONSE=None):
""" Reset the sequence of analyses in the worksheet """
""" sequence is [{'pos': , 'type': , 'uid', 'key'},] """
old_seq = self.getLayout()
new_dict = {}
new_seq = []
other_dict = {}
for seq in old_seq:
if seq['key'] == '':
if seq['pos'] not in other_dict:
other_dict[seq['pos']] = []
other_dict[seq['pos']].append(seq)
continue
if seq['key'] not in new_dict:
new_dict[seq['key']] = []
analyses = new_dict[seq['key']]
analyses.append(seq)
new_dict[seq['key']] = analyses
new_keys = sorted(new_dict.keys())
rc = getToolByName(self, REFERENCE_CATALOG)
seqno = 1
for key in new_keys:
analyses = {}
if len(new_dict[key]) == 1:
new_dict[key][0]['pos'] = seqno
new_seq.append(new_dict[key][0])
else:
for item in new_dict[key]:
item['pos'] = seqno
analysis = rc.lookupObject(item['uid'])
service = analysis.Title()
analyses[service] = item
a_keys = sorted(analyses.keys())
for a_key in a_keys:
new_seq.append(analyses[a_key])
seqno += 1
other_keys = other_dict.keys()
other_keys.sort()
for other_key in other_keys:
for item in other_dict[other_key]:
item['pos'] = seqno
new_seq.append(item)
seqno += 1
self.setLayout(new_seq)
RESPONSE.redirect('%s/manage_results' % self.absolute_url())
security.declarePublic('current_date')
def current_date(self):
""" return current date """
return DateTime()
def setInstrument(self, instrument, override_analyses=False):
""" Sets the specified instrument to the Analysis from the
Worksheet. Only sets the instrument if the Analysis
allows it, according to its Analysis Service and Method.
If an analysis has already assigned an instrument, it won't
be overriden.
The Analyses that don't allow the instrument specified will
not be modified.
Returns the number of analyses affected
"""
analyses = [an for an in self.getAnalyses()
if (not an.getInstrument() or override_analyses)
and an.isInstrumentAllowed(instrument)]
total = 0
for an in analyses:
# An analysis can be done using differents Methods.
# Un method can be supported by more than one Instrument,
# but not all instruments support one method.
# We must force to set the instrument's method too. Otherwise,
# the WS manage results view will display the an's default
# method and its instruments displaying, only the instruments
# for the default method in the picklist.
meth = instrument.getMethod()
if an.isMethodAllowed(meth):
an.setMethod(meth)
success = an.setInstrument(instrument)
if success is True:
total += 1
self.getField('Instrument').set(self, instrument)
return total
def getAnalystName(self):
""" Returns the name of the currently assigned analyst
"""
mtool = getToolByName(self, 'portal_membership')
analyst = self.getAnalyst().strip()
analyst_member = mtool.getMemberById(analyst)
if analyst_member != None:
return analyst_member.getProperty('fullname')
else:
return analyst
def workflow_script_submit(self):
# Don't cascade. Shouldn't be submitting WSs directly for now,
# except edge cases where all analyses are already submitted,
# but self was held back until an analyst was assigned.
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
can_attach = True
for a in self.getAnalyses():
if workflow.getInfoFor(a, 'review_state') in \
('to_be_sampled', 'to_be_preserved', 'sample_due',
'sample_received', 'attachment_due', 'assigned',):
# Note: referenceanalyses and duplicateanalyses can still
# have review_state = "assigned".
can_attach = False
break
if can_attach:
doActionFor(self, 'attach')
def workflow_script_attach(self):
if skip(self, "attach"):
return
self.reindexObject(idxs=["review_state", ])
# Don't cascade. Shouldn't be attaching WSs for now (if ever).
return
def workflow_script_retract(self):
if skip(self, "retract"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "retract all analyses" in self.REQUEST['workflow_skiplist']:
# retract all analyses in this self.
# (NB: don't retract if it's verified)
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state not in ('attachment_due', 'to_be_verified',):
continue
doActionFor(analysis, 'retract')
def workflow_script_verify(self):
if skip(self, "verify"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "verify all analyses" in self.REQUEST['workflow_skiplist']:
# verify all analyses in this self.
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state != 'to_be_verified':
continue
doActionFor(analysis, "verify")
def workflow_script_reject(self):
"""Copy real analyses to RejectAnalysis, with link to real
create a new worksheet, with the original analyses, and new
duplicates and references to match the rejected
worksheet.
"""
if skip(self, "reject"):
return
utils = getToolByName(self, 'plone_utils')
workflow = self.portal_workflow
def copy_src_fields_to_dst(src, dst):
# These will be ignored when copying field values between analyses
ignore_fields = ['UID',
'id',
'title',
'allowDiscussion',
'subject',
'description',
'location',
'contributors',
'creators',
'effectiveDate',
'expirationDate',
'language',
'rights',
'creation_date',
'modification_date',
'Layout', # ws
'Analyses', # ws
]
fields = src.Schema().fields()
for field in fields:
fieldname = field.getName()
if fieldname in ignore_fields:
continue
getter = getattr(src, 'get'+fieldname,
src.Schema().getField(fieldname).getAccessor(src))
setter = getattr(dst, 'set'+fieldname,
dst.Schema().getField(fieldname).getMutator(dst))
if getter is None or setter is None:
# ComputedField
continue
setter(getter())
analysis_positions = {}
for item in self.getLayout():
analysis_positions[item['analysis_uid']] = item['position']
old_layout = []
new_layout = []
# New worksheet
worksheets = self.aq_parent
new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
new_ws.unmarkCreationFlag()
new_ws_id = renameAfterCreation(new_ws)
copy_src_fields_to_dst(self, new_ws)
new_ws.edit(
Number = new_ws_id,
Remarks = self.getRemarks()
)
# Objects are being created inside other contexts, but we want their
# workflow handlers to be aware of which worksheet this is occurring in.
# We save the worksheet in request['context_uid'].
# We reset it again below.... be very sure that this is set to the
# UID of the containing worksheet before invoking any transitions on
# analyses.
self.REQUEST['context_uid'] = new_ws.UID()
# loop all analyses
analyses = self.getAnalyses()
new_ws_analyses = []
old_ws_analyses = []
for analysis in analyses:
# Skip published or verified analyses
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state in ['published', 'verified', 'retracted']:
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
continue
# Normal analyses:
# - Create matching RejectAnalysis inside old WS
# - Link analysis to new WS in same position
# - Copy all field values
# - Clear analysis result, and set Retested flag
if analysis.portal_type == 'Analysis':
reject = _createObjectByType('RejectAnalysis', self, tmpID())
reject.unmarkCreationFlag()
reject_id = renameAfterCreation(reject)
copy_src_fields_to_dst(analysis, reject)
reject.setAnalysis(analysis)
reject.reindexObject()
analysis.edit(
Result = None,
Retested = True,
)
analysis.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(reject.UID())
old_layout.append({'position': position,
'type':'r',
'analysis_uid':reject.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(analysis.UID())
new_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
# Reference analyses
# - Create a new reference analysis in the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'ReferenceAnalysis':
service_uid = analysis.getService().UID()
reference = analysis.aq_parent
reference_type = analysis.getReferenceType()
new_analysis_uid = reference.addReferenceAnalysis(service_uid,
reference_type)
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':reference_type,
'analysis_uid':analysis.UID(),
'container_uid':reference.UID()})
new_ws_analyses.append(new_analysis_uid)
new_layout.append({'position': position,
'type':reference_type,
'analysis_uid':new_analysis_uid,
'container_uid':reference.UID()})
workflow.doActionFor(analysis, 'reject')
new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
workflow.doActionFor(new_reference, 'assign')
analysis.reindexObject()
# Duplicate analyses
# - Create a new duplicate inside the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'DuplicateAnalysis':
src_analysis = analysis.getAnalysis()
ar = src_analysis.aq_parent
service = src_analysis.getService()
duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
new_duplicate = _createObjectByType('DuplicateAnalysis',
new_ws, duplicate_id)
new_duplicate.unmarkCreationFlag()
copy_src_fields_to_dst(analysis, new_duplicate)
workflow.doActionFor(new_duplicate, 'assign')
new_duplicate.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'d',
'analysis_uid':analysis.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(new_duplicate.UID())
new_layout.append({'position': position,
'type':'d',
'analysis_uid':new_duplicate.UID(),
'container_uid':new_ws.UID()})
workflow.doActionFor(analysis, 'reject')
analysis.reindexObject()
new_ws.setAnalyses(new_ws_analyses)
new_ws.setLayout(new_layout)
new_ws.replaces_rejected_worksheet = self.UID()
for analysis in new_ws.getAnalyses():
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state == 'to_be_verified':
changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
self.REQUEST['context_uid'] = self.UID()
self.setLayout(old_layout)
self.setAnalyses(old_ws_analyses)
self.replaced_by = new_ws.UID()
def checkUserManage(self):
""" Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
"""
granted = False
can_access = self.checkUserAccess()
if can_access == True:
pm = getToolByName(self, 'portal_membership')
edit_allowed = pm.checkPermission(EditWorksheet, self)
if edit_allowed:
# Check if the current user is the WS's current analyst
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
# Has management privileges?
if pm.checkPermission(ManageWorksheets, self):
granted = True
else:
granted = True
return granted
def checkUserAccess(self):
""" Checks if the current user has granted access to this worksheet.
Returns False if the user has no access, otherwise returns True
"""
# Deny access to foreign analysts
allowed = True
pm = getToolByName(self, "portal_membership")
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
roles = member.getRoles()
restrict = 'Manager' not in roles \
and 'LabManager' not in roles \
and 'LabClerk' not in roles \
and 'RegulatoryInspector' not in roles \
and self.bika_setup.getRestrictWorksheetUsersAccess()
allowed = not restrict
return allowed
def setAnalyst(self,analyst):
for analysis in self.getAnalyses():
analysis.setAnalyst(analyst)
self.Schema().getField('Analyst').set(self, analyst)
security.declarePublic('getPriority')
def getPriority(self):
""" get highest priority from all analyses
"""
analyses = self.getAnalyses()
priorities = []
for analysis in analyses:
if not hasattr(analysis, 'getPriority'):
continue
if analysis.getPriority():
priorities.append(analysis.getPriority())
priorities = sorted(priorities, key = itemgetter('sortKey'))
if priorities:
return priorities[-1]
registerType(Worksheet, PROJECTNAME)
| agpl-3.0 | 3,233,029,592,291,378,700 | 42.321348 | 101 | 0.554777 | false | 4.583452 | false | false | false |
jmenashe/person-reidentification | reidentifier/results/a_detections/gen_roc.py | 1 | 4213 | #!/usr/bin/env python
import csv, sys
from os.path import splitext, dirname
def readFile(file):
data = csv.reader(open(file))
# Read the column names from the first line of the file
fields = data.next()
results = []
for row in data:
# Zip together the field names and values
items = zip(fields, row)
item = {}
# Add the value to our dictionary
for (name, value) in items:
item[name] = value.strip()
result = float(item['result'])
label = int(item['label'])
results += [[result, label]]
return results
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def getStats(results, threshold = 0.0):
fp = 0
tp = 0
fn = 0
tn = 0
for (result, label) in results:
if result > threshold and label == 1:
tp += 1
if result <= threshold and label == 1:
fn += 1
if result <= threshold and label == 0:
tn += 1
if result > threshold and label == 0:
fp += 1
return tp, fp, tn, fn
def getAPR(results, threshold = 0):
tp, fp, tn, fn = getStats(results,threshold)
if tp == 0 and fp == 0:
precision = 0
else:
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
accuracy = float(tp + tn) / (tp + tn + fp + fn)
return accuracy, precision, recall
def ROC(results, t):
tp, fp, tn, fn = getStats(results, t)
tpr = float(tp) / (tp + fn)
fpr = float(fp) / (fp + tn)
return fpr, tpr
def PR(results, t):
tp, fp, tn, fn = getStats(results, t)
p = float(tp) / (tp + fp)
r = float(tp) / (tp + fn)
return r, p
def getBestThreshold(results):
maxResult = max(map(lambda x: x[0], results))
minResult = min(map(lambda x: x[0], results))
r = maxResult - minResult
step = r / 100.0
score = 0.0
threshold = 0.0
for t in drange(minResult,maxResult,step):
a,p,r = getAPR(results,t)
s = 2.0 * p + r
if score < s:
score = s
threshold = t
return threshold
def getCurve(results, fn):
maxResult = max(map(lambda x: x[0], results))
minResult = min(map(lambda x: x[0], results))
r = maxResult - minResult
step = r / 100.0
rates = []
for t in drange(minResult,maxResult,step):
x, y = fn(results, t)
rates += [[x, y]]
return rates
class GraphParams:
def __init__(self, title = "", ylabel = "True Positive Rate", xlabel = "False Positive Rate"):
self.title = title
self.ylabel = ylabel
self.xlabel = xlabel
def generateCurves(files, params = GraphParams()):
curves = open("curves.gp", 'w')
curves.write('set xrange [0:1]; set yrange [0:1];\n')
curves.write('set xlabel "%s";\n' % params.xlabel)
curves.write('set ylabel "%s";\n' % params.ylabel)
curves.write('set title "%s";\n' % params.title)
curves.write('set datafile separator ",";\n')
curves.write('set key right center outside;\n')
curves.write('plot \\\n')
i = 1
for f, t in files:
results = readFile(f)
rates = getCurve(results, ROC)
f = splitext(f)[0]
outfile = f + "_roc.csv"
output = open(outfile, 'w')
for r in rates:
output.write("%s,%s\n" % (r[0], r[1]))
output.close()
curves.write(' "%s" u 1:2 title "%s" with lines' % (outfile,t))
if i == len(files):
curves.write(';\n')
else:
curves.write(', \\\n')
i += 1
curves.write("pause -1")
files = []
#files += [["hasHat_100w_10s_hog_rbf.csv", "HasHat with RBF"]]
#files += [["hasHat_poly3.csv", "HasHat with 3-Poly"]]
files += [["hasHat.csv", "Has Hat"]]
files += [["hasJeans.csv", "Has Jeans"]]
files += [["hasLongHair.csv", "Has Long Hair"]]
files += [["hasLongPants.csv", "Has Long Pants"]]
files += [["hasLongSleeves.csv", "Has Long Sleeves"]]
files += [["hasShorts.csv", "Has Shorts"]]
files += [["hasTShirt.csv", "Has T-Shirt"]]
files += [["isMale.csv", "Is Male"]]
files += [["hasGlasses.csv", "Has Glasses"]]
for f in files:
f[0] = "rbf_oneclass_100w_25s/" + f[0]
generateCurves(files, GraphParams(title = "Performance with RBF Kernels"))
for f in files:
results = readFile(f[0])
#t = getBestThreshold(results)
#print max(map(lambda x: x[0], results))
#print results
a, p, r = getAPR(results)
print "%s: A: %2.2f, P: %2.2f, R: %2.2f" % (f[1],a,p,r)
| gpl-3.0 | -8,259,149,482,517,327,000 | 25.834395 | 96 | 0.594113 | false | 2.90953 | false | false | false |
0bserver07/One-Hundred-Layers-Tiramisu | model-dynamic.py | 1 | 4629 | from __future__ import absolute_import
from __future__ import print_function
import os
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Conv2DTranspose
from keras import backend as K
import cv2
import numpy as np
import json
K.set_image_dim_ordering('th')
# weight_decay = 0.0001
from keras.regularizers import l2
class Tiramisu():
def __init__(self):
self.create()
def DenseBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size=(3, 3), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Dropout(0.2))
def TransitionDown(self,filters):
model = self.model
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size=(1, 1), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
def TransitionUp(self,filters, input_shape,output_shape):
model = self.model
model.add(Conv2DTranspose(filters,kernel_size=(3, 3), strides=(2, 2),data_format='channels_first', output_shape=output_shape,
padding='same', input_shape=input_shape, init="he_uniform", W_regularizer = l2(0.0001)))
def gfactorCounterDown(self,model_self,growth_factor,block_size,previous_conv_size,block_count=5):
for i in range(block_count):
m = block_size * growth_factor + previous_conv_size
model_self.DenseBlock(growth_factor,m)
model_self.TransitionDown(growth_factor,m)
def gfactorCounterUp(self,model_self,growth_factor,block_size,previous_block_size,previous_conv_size,block_count=5):
# previous_conv_size = 288, since:
# self.DenseBlock(4,288) # 4*12 = 48 + 288 = 336
# self.TransitionDown(288)
for i in range(block_count):
m = block_size * growth_factor + previous_block_size * growth_factor + previous_conv_size
model_self.DenseBlock(growth_factor,m)
model_self.TransitionDown(growth_factor,m)
def create(self):
model = self.model = models.Sequential()
# cropping
# model.add(Cropping2D(cropping=((68, 68), (128, 128)), input_shape=(3, 360,480)))
model.add(Conv2D(48, kernel_size=(3, 3), padding='same', input_shape=(3,224,224), init="he_uniform", W_regularizer = l2(0.0001)))
# (5 * 4)* 2 + 5 + 5 + 1 + 1 +1
# growth_m = 4 * 12
# previous_m = 48
self.gfactorCounterDown(self.model,12,4,48,5)
# self.DenseBlock(4,96) # 4*12 = 48 + 48 = 96
# self.TransitionDown(96)
# self.DenseBlock(4,144) # 4*12 = 48 + 96 = 144
# self.TransitionDown(144)
# self.DenseBlock(4,192) # 4*12 = 48 + 144 = 192
# self.TransitionDown(192)
# self.DenseBlock(4,240)# 4*12 = 48 + 192 = 240
# self.TransitionDown(240)
# self.DenseBlock(4,288) # 4*12 = 48 + 288 = 336
# self.TransitionDown(288)
self.DenseBlock(15,336) # 4 * 12 = 48 + 288 = 336
self.gfactorCounterDown(self.model,12,4,4,288,5)
# self.TransitionUp(384, (384, 7, 7), (None, 384, 14, 14)) # m = 288 + 4x12 + 4x12 = 384.
# self.DenseBlock(4,384)
# self.TransitionUp(336, (336, 14, 14), (None, 336, 28, 28)) #m = 240 + 4x12 + 4x12 = 336
# self.DenseBlock(4,336)
# self.TransitionUp(288, (288, 28, 28), (None, 288, 56, 56)) # m = 192 + 4x12 + 4x12 = 288
# self.DenseBlock(4,288)
# self.TransitionUp(240, (240, 56, 56), (None, 240, 112, 112)) # m = 144 + 4x12 + 4x12 = 240
# self.DenseBlock(4,240)
# self.TransitionUp(192, (192, 112, 112), (None, 192, 224, 224)) # m = 96 + 4x12 + 4x12 = 192
# self.DenseBlock(4,192)
model.add(Conv2D(12, kernel_size=(3, 3), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Reshape((12, 224 * 224)))
model.add(Permute((2, 1)))
model.add(Activation('softmax'))
model.summary()
with open('tiramisu_fc_dense56_model.json', 'w') as outfile:
outfile.write(json.dumps(json.loads(model.to_json()), indent=3))
Tiramisu() | mit | -4,171,338,367,017,541,600 | 37.907563 | 137 | 0.612875 | false | 3.179258 | false | false | false |
pajlada/tyggbot | pajbot/modules/quests/typememessage.py | 2 | 2350 | import logging
from pajbot.managers.handler import HandlerManager
from pajbot.managers.redis import RedisManager
from pajbot.modules.base import ModuleSetting
from pajbot.modules.quest import QuestModule
from pajbot.modules.quests import BaseQuest
log = logging.getLogger(__name__)
class TypeMeMessageQuestModule(BaseQuest):
ID = "quest-" + __name__.split(".")[-1]
NAME = "Colorful chat /me"
DESCRIPTION = "Type X /me messages with X message length."
PARENT_MODULE = QuestModule
CATEGORY = "Quest"
SETTINGS = [
ModuleSetting(
key="quest_limit",
label="How many messages does the user needs to type?",
type="number",
required=True,
placeholder="",
default=100,
constraints={"min_value": 1, "max_value": 200},
),
ModuleSetting(
key="quest_message_length",
label="How many letters minimum should be in the message?",
type="number",
required=True,
placeholder="",
default=15,
constraints={"min_value": 1, "max_value": 500},
),
]
def get_limit(self):
return self.settings["quest_limit"]
def get_quest_message_length(self):
return self.settings["quest_message_length"]
def on_message(self, source, message, event, **rest):
if len(message) < self.get_quest_message_length() or event.type != "action":
return
user_progress = self.get_user_progress(source, default=0)
if user_progress >= self.get_limit():
return
user_progress += 1
redis = RedisManager.get()
if user_progress == self.get_limit():
self.finish_quest(redis, source)
self.set_user_progress(source, user_progress, redis=redis)
def start_quest(self):
HandlerManager.add_handler("on_message", self.on_message)
redis = RedisManager.get()
self.load_progress(redis=redis)
def stop_quest(self):
HandlerManager.remove_handler("on_message", self.on_message)
redis = RedisManager.get()
self.reset_progress(redis=redis)
def get_objective(self):
return f"Type {self.get_limit()} /me messages with a length of minimum {self.get_quest_message_length()} letters KappaPride "
| mit | 5,674,103,126,088,242,000 | 29.128205 | 133 | 0.614468 | false | 4.010239 | false | false | false |
ameily/pyjotr | pyjotr/coverage.py | 1 | 2305 |
from coverage.report import Reporter
import os
class JsonFileCoverageCounters(object):
def __init__(self):
self.hits = 0
self.misses = 0
def jsonify(self):
return dict(
hits=self.hits,
misses=self.misses
)
class JsonFileCoverage(object):
def __init__(self, path):
self.path = path
self.lines = {}
self.counters = JsonFileCoverageCounters()
def jsonify(self):
return dict(
path=self.path,
lines=self.lines,
counters=self.counters.jsonify()
)
class JsonCoverageCounters(object):
def __init__(self):
self.files = 0
self.hits = 0
self.misses = 0
def jsonify(self):
return dict(
files=self.files,
hits=self.hits,
misses=self.misses
)
class JsonCoverageReport(Reporter):
def __init__(self, cov, config):
super(JsonCoverageReport, self).__init__(cov, config)
#self.arcs = cov.data.has_arcs()
#self.packages = {}
self.path_strip_prefix = os.getcwd() + os.sep
self.files = []
self.counters = JsonCoverageCounters()
def report(self, morfs=None, outfile=None):
#self.packages = {}
self.coverage._harvest_data()
#self.coverage.config.from_args(
# ignore_errors=None, omit=None, include=None,
# show_missing=None
#)
self.report_files(self.json_file, morfs)
self.counters.files = len(self.files)
self.counters.misses = sum([f.counters.misses for f in self.files])
self.counters.hits = sum([f.counters.hits for f in self.files])
def json_file(self, cu, analysis):
filename = cu.file_locator.relative_filename(cu.filename).replace('\\', '/')
cfile = JsonFileCoverage(filename)
for line in sorted(analysis.statements):
cfile.lines[line] = int(line not in analysis.missing)
cfile.counters.misses = len(analysis.missing)
cfile.counters.hits = len(analysis.statements) - cfile.counters.misses
self.files.append(cfile)
def jsonify(self):
return dict(
counters=self.counters.jsonify(),
files=[f.jsonify() for f in self.files]
)
| bsd-2-clause | 723,880,484,888,024,300 | 27.109756 | 84 | 0.584816 | false | 3.822554 | false | false | false |
datapythonista/pandas | pandas/tests/frame/methods/test_value_counts.py | 2 | 3871 | import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_normalize():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(normalize=True)
expected = pd.Series(
data=[0.5, 0.25, 0.25],
index=pd.MultiIndex.from_arrays(
[(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_single_col_default():
df = pd.DataFrame({"num_legs": [2, 4, 4, 6]})
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays([[4, 2, 6]], names=["num_legs"]),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts()
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty_normalize():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts(normalize=True)
expected = pd.Series([], dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_dropna_true(nulls_fixture):
# GH 41334
df = pd.DataFrame(
{
"first_name": ["John", "Anne", "John", "Beth"],
"middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
},
)
result = df.value_counts()
expected = pd.Series(
data=[1, 1],
index=pd.MultiIndex.from_arrays(
[("Beth", "John"), ("Louise", "Smith")], names=["first_name", "middle_name"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_dropna_false(nulls_fixture):
# GH 41334
df = pd.DataFrame(
{
"first_name": ["John", "Anne", "John", "Beth"],
"middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
},
)
result = df.value_counts(dropna=False)
expected = pd.Series(
data=[1, 1, 1, 1],
index=pd.MultiIndex(
levels=[
pd.Index(["Anne", "Beth", "John"]),
pd.Index(["Louise", "Smith", nulls_fixture]),
],
codes=[[0, 1, 2, 2], [2, 0, 1, 2]],
names=["first_name", "middle_name"],
),
)
tm.assert_series_equal(result, expected)
| bsd-3-clause | 7,438,535,327,190,715,000 | 25.513699 | 88 | 0.535262 | false | 3.040848 | true | false | false |
Connexions/cnx-archive | cnxarchive/utils/mimetype.py | 1 | 1223 | # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013-2015, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Methods for handling mimetypes."""
__all__ = (
'COLLECTION_MIMETYPE',
'SUBCOLLECTION_MIMETYPE',
'FOLDER_MIMETYPE',
'MIMETYPES',
'MODULE_MIMETYPE',
'COMPOSITE_MODULE_MIMETYPE',
'PORTALTYPE_TO_MIMETYPE_MAPPING',
'portaltype_to_mimetype',
)
MODULE_MIMETYPE = 'application/vnd.org.cnx.module'
COMPOSITE_MODULE_MIMETYPE = 'application/vnd.org.cnx.composite-module'
COLLECTION_MIMETYPE = 'application/vnd.org.cnx.collection'
SUBCOLLECTION_MIMETYPE = 'application/vnd.org.cnx.subcollection'
FOLDER_MIMETYPE = 'application/vnd.org.cnx.folder'
MIMETYPES = (MODULE_MIMETYPE, COLLECTION_MIMETYPE, FOLDER_MIMETYPE,)
PORTALTYPE_TO_MIMETYPE_MAPPING = {
'Module': MODULE_MIMETYPE,
'CompositeModule': COMPOSITE_MODULE_MIMETYPE,
'Collection': COLLECTION_MIMETYPE,
'SubCollection': SUBCOLLECTION_MIMETYPE,
}
def portaltype_to_mimetype(portal_type):
"""Map the given ``portal_type`` to a mimetype."""
return PORTALTYPE_TO_MIMETYPE_MAPPING[portal_type]
| agpl-3.0 | -5,410,816,853,727,015,000 | 31.184211 | 70 | 0.715454 | false | 3.080605 | false | false | false |
amyvmiwei/chromium | tools/grit/grit/grit_runner.py | 3 | 6432 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Command processor for GRIT. This is the script you invoke to run the various
GRIT tools.
'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import getopt
from grit import util
import grit.exception
import grit.tool.build
import grit.tool.count
import grit.tool.diff_structures
import grit.tool.menu_from_parts
import grit.tool.newgrd
import grit.tool.resize
import grit.tool.rc2grd
import grit.tool.test
import grit.tool.transl2tc
import grit.tool.unit
# Copyright notice
_COPYRIGHT = '''\
GRIT - the Google Resource and Internationalization Tool
Copyright (c) Google Inc. %d
''' % util.GetCurrentYear()
# Keys for the following map
_CLASS = 1
_REQUIRES_INPUT = 2
_HIDDEN = 3 # optional key - presence indicates tool is hidden
# Maps tool names to the tool's module. Done as a list of (key, value) tuples
# instead of a map to preserve ordering.
_TOOLS = [
['build', { _CLASS : grit.tool.build.RcBuilder, _REQUIRES_INPUT : True }],
['newgrd', { _CLASS : grit.tool.newgrd.NewGrd, _REQUIRES_INPUT : False }],
['rc2grd', { _CLASS : grit.tool.rc2grd.Rc2Grd, _REQUIRES_INPUT : False }],
['transl2tc', { _CLASS : grit.tool.transl2tc.TranslationToTc,
_REQUIRES_INPUT : False }],
['sdiff', { _CLASS : grit.tool.diff_structures.DiffStructures,
_REQUIRES_INPUT : False }],
['resize', { _CLASS : grit.tool.resize.ResizeDialog, _REQUIRES_INPUT : True }],
['unit', { _CLASS : grit.tool.unit.UnitTestTool, _REQUIRES_INPUT : False }],
['count', { _CLASS : grit.tool.count.CountMessage, _REQUIRES_INPUT : True }],
['test', { _CLASS: grit.tool.test.TestTool, _REQUIRES_INPUT : True, _HIDDEN : True }],
['menufromparts', { _CLASS: grit.tool.menu_from_parts.MenuTranslationsFromParts,
_REQUIRES_INPUT : True, _HIDDEN : True }],
]
def PrintUsage():
print _COPYRIGHT
tool_list = ''
for (tool, info) in _TOOLS:
if not _HIDDEN in info.keys():
tool_list += ' %-12s %s\n' % (tool, info[_CLASS]().ShortDescription())
# TODO(joi) Put these back into the usage when appropriate:
#
# -d Work disconnected. This causes GRIT not to attempt connections with
# e.g. Perforce.
#
# -c Use the specified Perforce CLIENT when talking to Perforce.
print '''Usage: grit [GLOBALOPTIONS] TOOL [args to tool]
Global options:
-i INPUT Specifies the INPUT file to use (a .grd file). If this is not
specified, GRIT will look for the environment variable GRIT_INPUT.
If it is not present either, GRIT will try to find an input file
named 'resource.grd' in the current working directory.
-v Print more verbose runtime information.
-x Print extremely verbose runtime information. Implies -v
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
Tools:
TOOL can be one of the following:
%s
For more information on how to use a particular tool, and the specific
arguments you can send to that tool, execute 'grit help TOOL'
''' % (tool_list)
class Options(object):
'''Option storage and parsing.'''
def __init__(self):
self.disconnected = False
self.client = ''
self.input = None
self.verbose = False
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
def ReadOptions(self, args):
'''Reads options from the start of args and returns the remainder.'''
(opts, args) = getopt.getopt(args, 'g:dvxc:i:p:')
for (key, val) in opts:
if key == '-d': self.disconnected = True
elif key == '-c': self.client = val
elif key == '-i': self.input = val
elif key == '-v':
self.verbose = True
util.verbose = True
elif key == '-x':
self.verbose = True
util.verbose = True
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
return args
def __repr__(self):
return '(disconnected: %d, verbose: %d, client: %s, input: %s)' % (
self.disconnected, self.verbose, self.client, self.input)
def _GetToolInfo(tool):
'''Returns the info map for the tool named 'tool' or None if there is no
such tool.'''
matches = filter(lambda t: t[0] == tool, _TOOLS)
if not len(matches):
return None
else:
return matches[0][1]
def Main(args):
'''Parses arguments and does the appropriate thing.'''
util.ChangeStdoutEncoding()
if not len(args) or len(args) == 1 and args[0] == 'help':
PrintUsage()
return 0
elif len(args) == 2 and args[0] == 'help':
tool = args[1].lower()
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
print ("Help for 'grit %s' (for general help, run 'grit help'):\n"
% (tool))
print _GetToolInfo(tool)[_CLASS].__doc__
return 0
else:
options = Options()
args = options.ReadOptions(args) # args may be shorter after this
tool = args[0]
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
try:
if _GetToolInfo(tool)[_REQUIRES_INPUT]:
os.stat(options.input)
except OSError:
print ('Input file %s not found.\n'
'To specify a different input file:\n'
' 1. Use the GRIT_INPUT environment variable.\n'
' 2. Use the -i command-line option. This overrides '
'GRIT_INPUT.\n'
' 3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
"'resource.grd'\n"
' from the current directory.' % options.input)
return 2
toolobject = _GetToolInfo(tool)[_CLASS]()
if options.profile_dest:
import hotshot
prof = hotshot.Profile(options.profile_dest)
prof.runcall(toolobject.Run, options, args[1:])
else:
toolobject.Run(options, args[1:])
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | 2,232,770,222,574,269,400 | 30.365854 | 88 | 0.635303 | false | 3.407525 | false | false | false |
bastimeyer/streamlink | tests/plugins/test_schoolism.py | 3 | 2480 | import unittest
from streamlink.plugins.schoolism import Schoolism
class TestPluginSchoolism(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.schoolism.com/watchLesson.php',
]
for url in should_match:
self.assertTrue(Schoolism.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.schoolism.com',
]
for url in should_not_match:
self.assertFalse(Schoolism.can_handle_url(url))
def test_playlist_parse_subs(self):
with_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 1",playlistTitle:"Part 1",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-1.vtt",
}],
},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 2",playlistTitle:"Part 2",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-2.vtt",
}]
}];
"""
data = Schoolism.playlist_schema.validate(with_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
def test_playlist_parse(self):
without_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 1",playlistTitle:"Part 1",}],},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 2",playlistTitle:"Part 2",}]}
];
"""
data = Schoolism.playlist_schema.validate(without_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
| bsd-2-clause | 3,052,704,488,478,411,300 | 45.792453 | 250 | 0.605645 | false | 3.33782 | true | false | false |
kaldonis/ft-event-manager | src/app/models/bot.py | 1 | 2674 | from app.domain.constants import CONSTANTS
from app.models import EventSearchMixin, WeightclassEventSearchMixin, DataInterface
from app.models.database import DBObject
class Bot(DBObject, EventSearchMixin, WeightclassEventSearchMixin):
bid = None
registered_ind = None
event_id = None
name = None
team_name = None
team_email = None
team_city = None
team_state = None
category = None
weightclass = None
primary_freq = None
secondary_freq = None
multibot_ind = None
notes = None
photo_url = None
seed_number = None
bracket_id = None
@classmethod
def get_by_bracket_seed(cls, event_id, bracket_id, seed):
db = DataInterface(CONSTANTS.DB_NAME)
sql = "SELECT * FROM %s WHERE event_id = %d AND seed_number = %d AND bracket_id = %d" % (cls.__name__, int(event_id), seed, bracket_id)
result = db.fetch_one(sql)
return cls(**(result)) if result else None
@classmethod
def get_by_bracket(cls, bracket_id):
"""
alternative to the method available in BracketSearchMixin, parses through Matches instead
"""
from app.models.match import Match
matches = Match.get_by_bracket_round(bracket_id, 'A')
bots = []
for match in matches:
bots += [match.bot1_id, match.bot2_id]
bots = list(set(bots))
return [cls.get_by_id(bot_id) for bot_id in bots if bot_id]
@classmethod
def bye(cls):
"""
placeholder bot object for a bye
"""
params = {
'name': '--bye--',
'id': 0
}
return cls(**(params))
def register(self):
"""
registers the bot
"""
sql = "UPDATE %s SET registered_ind = 'Y' WHERE id = %d" % (self.__class__.__name__, self.id)
return self.db.execute(sql)
def unregister(self):
"""
unregisters the bot
"""
sql = "UPDATE %s SET registered_ind = 'N' WHERE id = %d" % (self.__class__.__name__, self.id)
return self.db.execute(sql)
def to_dict(self):
return {
'id': self.id,
'botName': self.name,
'teamName': self.team_name,
'teamEmail': self.team_email,
'teamCity': self.team_city,
'teamState': self.team_state,
'category': self.category,
'weightclass': self.weightclass,
'photoUrl': self.photo_url,
'multibot': True if self.multibot_ind == 'Y' else False,
'isRegistered': True if self.registered_ind == 'Y' else False
} | gpl-2.0 | 1,966,577,702,323,303,200 | 30.845238 | 143 | 0.559461 | false | 3.847482 | false | false | false |
devlights/try-python | trypython/extlib/bitstring/bitstring01.py | 1 | 2317 | # coding: utf-8
"""
bitstringモジュールに関するサンプルです。
BitArrayについて(1)。
"""
import bitstring as bs
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
# ---------------------------------------------------------------
# [link]
# http://pythonhosted.org/bitstring/walkthrough.html
# ---------------------------------------------------------------
# BitArray は、バイナリデータを保持するコンテナ
# BitStream は、ポジションや読み込みの操作を行うことが出来るストリーム
# ---------------------------------------------------------------
# BitArray は、mutable
# インスタンスから2進数や16進数など様々な形で取り出せる
# ---------------------------------------------------------------
# hex は、ビットが4の倍数でないとエラーとなる
# oct は、ビットが3の倍数でないとエラーとなる
# ---------------------------------------------------------------
# 初期化の方法はいろいろある
# BitArray(bin='0b11111111')
# BitArray(hex='0xff')
# BitArray(uint=255, length=8)
# 上記はどれも同じデータをつくる
# ---------------------------------------------------------------
ba01 = bs.BitArray('0xff01')
pr('ba01', ba01)
pr('ba01.bin', ba01.bin)
pr('ba01.hex', ba01.hex)
pr('ba01.int', ba01.int)
pr('ba01.uint', ba01.uint)
pr('ba01.bytes', ba01.bytes)
try:
# 8進数はビットが3の倍数分存在しないと駄目
pr('ba01.oct', ba01.oct)
except bs.InterpretError as e:
pr('ba01.oct', e)
ba02 = ba01 + '0b00'
pr('ba02.oct', ba02.oct)
ba03 = bs.BitArray(bin='0b11111111')
pr('ba03', ba03)
pr('ba03.uint', ba03.uint)
ba04 = bs.BitArray(hex='0xff')
pr('ba04', ba04)
pr('ba04.uint', ba04.uint)
ba05 = bs.BitArray(uint=255, length=8)
pr('ba05', ba05)
pr('ba05.uint', ba05.uint)
def go():
obj = Sample()
obj.exec()
| mit | 4,366,161,010,745,510,400 | 28.208955 | 73 | 0.44047 | false | 2.725627 | false | false | false |
skuda/client-python | kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py | 1 | 4263 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VsphereVirtualDiskVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, fs_type=None, volume_path=None):
"""
V1VsphereVirtualDiskVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'fs_type': 'str',
'volume_path': 'str'
}
self.attribute_map = {
'fs_type': 'fsType',
'volume_path': 'volumePath'
}
self._fs_type = fs_type
self._volume_path = volume_path
@property
def fs_type(self):
"""
Gets the fs_type of this V1VsphereVirtualDiskVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.
:return: The fs_type of this V1VsphereVirtualDiskVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1VsphereVirtualDiskVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.
:param fs_type: The fs_type of this V1VsphereVirtualDiskVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def volume_path(self):
"""
Gets the volume_path of this V1VsphereVirtualDiskVolumeSource.
Path that identifies vSphere volume vmdk
:return: The volume_path of this V1VsphereVirtualDiskVolumeSource.
:rtype: str
"""
return self._volume_path
@volume_path.setter
def volume_path(self, volume_path):
"""
Sets the volume_path of this V1VsphereVirtualDiskVolumeSource.
Path that identifies vSphere volume vmdk
:param volume_path: The volume_path of this V1VsphereVirtualDiskVolumeSource.
:type: str
"""
if volume_path is None:
raise ValueError("Invalid value for `volume_path`, must not be `None`")
self._volume_path = volume_path
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 9,144,381,890,984,104,000 | 29.021127 | 183 | 0.562515 | false | 4.220792 | false | false | false |
Zocket/goodeal | catalog/models.py | 1 | 2731 | from django.db import models
# Create your models here.
class Category(models.Model):
name=models.CharField(max_length=50)
slug=models.SlugField(max_length=50,unique=True,
help_text='Unique value for product page URL, created from name.')
description=models.TextField()
is_active=models.BooleanField(default=True)
meta_keywords=models.CharField("Meta Keywords",max_length=255,
help_text='Comma-delimited set of SEO keywords for meta tag')
meta_description=models.CharField("Meta Description",max_length=255,
help_text='Content for description meta tag')
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
parent_category=models.ForeignKey('self')
#created_by (to be added after the user model is defined)
#updated_by (to be added after the user model is defined)
class Meta:
db_table='categories'
ordering=['-created_at']
verbose_name_plural='Categories'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('catalog_category',(),{'category_slug':self.slug})
class Product(models.Model):
name=models.CharField(max_length=255,unique=True)
slug=models.SlugField(max_length=255,unique=True,
help_text='Unique value for product-deal page URL, created from name.')
#shop (to be added after shop model is defined)
brand=models.CharField(max_length=50)
price=models.DecimalField(max_digits=9,decimal_places=2)
old_price=models.DecimalField(max_digits=9,decimal_places=2,
blank=True,default=0.00)
image=models.CharField(max_length=50)
is_active=models.BooleanField(default=True)
#quantity=models.IntegerField()
product_description=models.TextField()
deal_description=models.TextField()
deal_weeknumber=models.IntegerField()
meta_keywords=models.CharField("Meta Keywords",max_length=255,
help_text='Comma-delimited set of SEO keywords for meta tag')
meta_description=models.CharField("Meta Description",max_length=255,
help_text='Content for description meta tag')
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
categories=models.ManyToManyField(Category)
#created_by (to be added after the user model is defined)
#updated_by (to be added after the user model is defined)
class Meta:
db_table='products'
ordering=['-created_at']
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('catalog_product',(),{'product_slug':self.slug})
| mit | 8,190,625,543,559,492,000 | 39.176471 | 79 | 0.694984 | false | 3.862801 | false | false | false |
cc13ny/Allin | leetcode/105-Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal/ConBinTreefrPreInTraversal_001.py | 5 | 1720 | /**
* Definition for binary tree
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
import java.util.Hashtable;
public class Solution {
public TreeNode buildTree(int[] preorder, int[] inorder) {
int len = preorder.length;
if(len == 0) return null;
Hashtable<Integer, Integer> pretb = new Hashtable<Integer, Integer>();
Hashtable<Integer, Integer> intb = new Hashtable<Integer, Integer>();
int root = preorder[0];
TreeNode tree_root = new TreeNode(root);
for(int i = 0; i < len; i++){
pretb.put(preorder[i], i);
intb.put(inorder[i], i);
}
for(int j = 1; j < len; j++){
int num = preorder[j];
int inloc = intb.get(num);
TreeNode next = tree_root;
boolean flag = false;
while(true){
if(inloc < intb.get(next.val)){
if(next.left == null){
next.left = new TreeNode(num);
flag = true;
}
else{
next = next.left;
}
}
else{
if(next.right == null){
next.right = new TreeNode(num);
flag = true;
}
else{
next = next.right;
}
}
if(flag) break;
}
}
return tree_root;
}
}
| mit | -7,307,176,108,560,812,000 | 26.301587 | 78 | 0.393023 | false | 4.321608 | false | false | false |
JavierJia/vxquery | vxquery-benchmark/src/main/resources/noaa-ghcn-daily/scripts/weather_config_ghcnd.py | 11 | 3568 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base URL used to get all the required files.
BASE_DOWNLOAD_URL = 'http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/'
# List of required files for a build.
FILE_NAMES = []
FILE_NAMES.append('ghcnd-countries.txt')
FILE_NAMES.append('ghcnd-inventory.txt')
FILE_NAMES.append('ghcnd-states.txt')
FILE_NAMES.append('ghcnd-stations.txt')
FILE_NAMES.append('ghcnd-version.txt')
FILE_NAMES.append('ghcnd_all.tar.gz')
FILE_NAMES.append('ghcnd_gsn.tar.gz')
FILE_NAMES.append('ghcnd_hcn.tar.gz')
FILE_NAMES.append('readme.txt')
FILE_NAMES.append('status.txt')
# Store the row details here.
# Index values of each field details.
FIELD_INDEX_NAME = 0
FIELD_INDEX_START = 1
FIELD_INDEX_END = 2
FIELD_INDEX_TYPE = 3
DLY_FIELD_ID = 0
DLY_FIELD_YEAR = 1
DLY_FIELD_MONTH = 2
DLY_FIELD_ELEMENT = 3
DLY_FIELD_DAY_OFFSET = 4
DLY_FIELD_DAY_FIELDS = 4
DLY_FIELDS = []
# Details about the row.
DLY_FIELDS.append(['ID', 1, 11, 'Character'])
DLY_FIELDS.append(['YEAR', 12, 15, 'Integer'])
DLY_FIELDS.append(['MONTH', 16, 17, 'Integer'])
DLY_FIELDS.append(['ELEMENT', 18, 21, 'Character'])
# Days in each row.
for i in range(1, 32):
start = 22 + ((i - 1) * 8)
DLY_FIELDS.append(['VALUE' + str(i), (start + 0), (start + 4), 'Integer'])
DLY_FIELDS.append(['MFLAG' + str(i), (start + 5), (start + 5), 'Character'])
DLY_FIELDS.append(['QFLAG' + str(i), (start + 6), (start + 6), 'Character'])
DLY_FIELDS.append(['SFLAG' + str(i), (start + 7), (start + 7), 'Character'])
# Details about the row.
STATIONS_FIELDS = {}
STATIONS_FIELDS['ID'] = ['ID', 1, 11, 'Character']
STATIONS_FIELDS['LATITUDE'] = ['LATITUDE', 13, 20, 'Real']
STATIONS_FIELDS['LONGITUDE'] = ['LONGITUDE', 22, 30, 'Real']
STATIONS_FIELDS['ELEVATION'] = ['ELEVATION', 32, 37, 'Real']
STATIONS_FIELDS['STATE'] = ['STATE', 39, 40, 'Character']
STATIONS_FIELDS['NAME'] = ['NAME', 42, 71, 'Character']
STATIONS_FIELDS['GSNFLAG'] = ['GSNFLAG', 73, 75, 'Character']
STATIONS_FIELDS['HCNFLAG'] = ['HCNFLAG', 77, 79, 'Character']
STATIONS_FIELDS['WMOID'] = ['WMOID', 81, 85, 'Character']
# Details about the row.
COUNTRIES_FIELDS = {}
COUNTRIES_FIELDS['CODE'] = ['CODE', 1, 2, 'Character']
COUNTRIES_FIELDS['NAME'] = ['NAME', 4, 50, 'Character']
# Details about the row.
STATES_FIELDS = {}
STATES_FIELDS['CODE'] = ['CODE', 1, 2, 'Character']
STATES_FIELDS['NAME'] = ['NAME', 4, 50, 'Character']
# Details about the row.
INVENTORY_FIELDS = {}
INVENTORY_FIELDS['ID'] = ['ID', 1, 11, 'Character']
INVENTORY_FIELDS['LATITUDE'] = ['LATITUDE', 13, 20, 'Real']
INVENTORY_FIELDS['LONGITUDE'] = ['LONGITUDE', 22, 30, 'Real']
INVENTORY_FIELDS['ELEMENT'] = ['ELEMENT', 32, 35, 'Character']
INVENTORY_FIELDS['FIRSTYEAR'] = ['FIRSTYEAR', 37, 40, 'Integer']
INVENTORY_FIELDS['LASTYEAR'] = ['LASTYEAR', 42, 45, 'Integer']
| apache-2.0 | -9,106,545,514,342,361,000 | 36.557895 | 80 | 0.682735 | false | 2.978297 | false | false | false |
edxzw/edx-platform | common/djangoapps/student/tests/test_certificates.py | 23 | 7728 | """Tests for display of certificates on the student dashboard. """
import unittest
import ddt
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from certificates.api import get_certificate_url # pylint: disable=import-error
from course_modes.models import CourseMode
from student.models import LinkedInAddToProfileConfiguration
# pylint: disable=no-member
def _fake_is_request_in_microsite():
"""
Mocked version of microsite helper method to always return true
"""
return True
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(ModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
def setUp(self):
super(CertificateDisplayTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
self.course = CourseFactory()
self.course.certificates_display_behavior = "early_with_info"
self.update_course(self.course, self.user.username)
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate_no_id(self):
"""
Confirm that if we get a certificate with a no-id-professional mode
we still can download our certificate
"""
self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE)
self._check_can_download_certificate_no_id()
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save() # pylint: disable=no-member
self.store.update_item(self.course, self.user.id)
cert = self._create_certificate(enrollment_mode)
test_url = get_certificate_url(course_id=self.course.id, uuid=cert.verify_uuid)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
def test_post_to_linkedin_invisibility(self):
"""
Verifies that the post certificate to linked button
does not appear by default (when config is not set)
"""
self._create_certificate('honor')
# until we set up the configuration, the LinkedIn action
# button should not be visible
self._check_linkedin_visibility(False)
def test_post_to_linkedin_visibility(self):
"""
Verifies that the post certificate to linked button appears
as expected
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should see it
self._check_linkedin_visibility(True)
@mock.patch("microsite_configuration.microsite.is_request_in_microsite", _fake_is_request_in_microsite)
def test_post_to_linkedin_microsite(self):
"""
Verifies behavior for microsites which disables the post to LinkedIn
feature (for now)
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should not see it because we are in a microsite
self._check_linkedin_visibility(False)
def _check_linkedin_visibility(self, is_visible):
"""
Performs assertions on the Dashboard
"""
response = self.client.get(reverse('dashboard'))
if is_visible:
self.assertContains(response, u'Add Certificate to LinkedIn Profile')
else:
self.assertNotContains(response, u'Add Certificate to LinkedIn Profile')
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode)
return GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status="downloadable",
grade=0.98,
)
def _check_can_download_certificate(self):
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_download_certificate_no_id(self):
"""
Inspects the dashboard to see if a certificate for a non verified course enrollment
is present
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download')
self.assertContains(response, u'(PDF)')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
| agpl-3.0 | -4,790,777,539,730,320,000 | 38.228426 | 107 | 0.670807 | false | 4.188618 | true | false | false |
pmquang/python-anyconfig | anyconfig/backend/configobj.py | 1 | 2413 | #
# Copyright (C) 2013 - 2015 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
"""configobj backend.
- Format to support: configobj, http://goo.gl/JbP2Kp (readthedocs.org)
- Requirements: configobj (https://pypi.python.org/pypi/configobj/)
- Limitations: None obvious
- Special options:
- All options except for 'infile' passed to configobj.ConfigObj.__init__
should work.
- See also: http://goo.gl/LcVOzZ (readthedocs.org)
"""
from __future__ import absolute_import
import configobj
import anyconfig.backend.base
def make_configobj(cnf, **kwargs):
"""
Make a configobj.ConfigObj initalized with given config `cnf`.
:param cnf: Configuration data :: Parser.container
:param kwargs: optional keyword parameters passed to ConfigObj.__init__
:return: An initialized configobj.ConfigObj instance
"""
cobj = configobj.ConfigObj(**kwargs)
cobj.update(cnf)
return cobj
class Parser(anyconfig.backend.base.LParser, anyconfig.backend.base.D2Parser):
"""
Parser for Ini-like config files which configobj supports.
"""
_type = "configobj"
_priority = 10
_load_opts = ["cls", "configspec", "encoding", "interpolation",
"raise_errors", "list_values", "create_empty", "file_error",
"stringify", "indent_type", "default_encoding", "unrepr",
"_inspec", ]
_dump_opts = ["cls", "encoding", "list_values", "indent_type",
"default_encoding", "unrepr", "write_empty_values", ]
_open_flags = ('rb', 'wb')
load_from_path = anyconfig.backend.base.to_method(configobj.ConfigObj)
load_from_stream = anyconfig.backend.base.to_method(configobj.ConfigObj)
def dump_to_string(self, cnf, **kwargs):
"""
Dump config `cnf` to a string.
:param cnf: Configuration data to dump :: self.container
:param kwargs: backend-specific optional keyword parameters :: dict
:return: string represents the configuration
"""
return '\n'.join(make_configobj(cnf, **kwargs).write())
def dump_to_stream(self, cnf, stream, **kwargs):
"""
:param cnf: Configuration data to dump :: self.container
:param stream: Config file or file-like object
:param kwargs: backend-specific optional keyword parameters :: dict
"""
make_configobj(cnf, **kwargs).write(stream)
# vim:sw=4:ts=4:et:
| mit | 4,402,006,248,795,226,600 | 31.608108 | 78 | 0.648156 | false | 3.672755 | true | false | false |
iphoting/healthchecks | hc/front/tests/test_add_apprise.py | 2 | 1497 | from hc.api.models import Channel
from hc.test import BaseTestCase
from django.test.utils import override_settings
@override_settings(APPRISE_ENABLED=True)
class AddAppriseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_apprise/" % self.project.code
def test_instructions_work(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertContains(r, "Integration Settings", status_code=200)
def test_it_works(self):
form = {"url": "json://example.org"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
c = Channel.objects.get()
self.assertEqual(c.kind, "apprise")
self.assertEqual(c.value, "json://example.org")
self.assertEqual(c.project, self.project)
@override_settings(APPRISE_ENABLED=False)
def test_it_requires_client_id(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
| bsd-3-clause | 8,555,357,241,396,441,000 | 35.512195 | 76 | 0.665331 | false | 3.572792 | true | false | false |
sserrot/champion_relationships | venv/Lib/site-packages/pyrsistent/typing.py | 5 | 1767 | """Helpers for use with type annotation.
Use the empty classes in this module when annotating the types of Pyrsistent
objects, instead of using the actual collection class.
For example,
from pyrsistent import pvector
from pyrsistent.typing import PVector
myvector: PVector[str] = pvector(['a', 'b', 'c'])
"""
from __future__ import absolute_import
try:
from typing import Container
from typing import Hashable
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Sequence
from typing import Sized
from typing import TypeVar
__all__ = [
'CheckedPMap',
'CheckedPSet',
'CheckedPVector',
'PBag',
'PDeque',
'PList',
'PMap',
'PSet',
'PVector',
]
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
class CheckedPMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class CheckedPSet(Generic[T], Hashable):
pass
class CheckedPVector(Sequence[T], Hashable):
pass
class PBag(Container[T], Iterable[T], Sized, Hashable):
pass
class PDeque(Sequence[T], Hashable):
pass
class PList(Sequence[T], Hashable):
pass
class PMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class PSet(Generic[T], Hashable):
pass
class PVector(Sequence[T], Hashable):
pass
class PVectorEvolver(Generic[T]):
pass
class PMapEvolver(Generic[KT, VT]):
pass
class PSetEvolver(Generic[T]):
pass
except ImportError:
pass
| mit | -9,187,801,960,868,538,000 | 21.0875 | 80 | 0.627051 | false | 3.849673 | false | false | false |
JustF0rWork/malware | trails/feeds/malwaredomains.py | 1 | 1677 | #!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://malwaredomains.lehigh.edu/files/domains.txt"
__check__ = "safebrowsing.clients.google.com"
__reference__ = "malwaredomains.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip('\r').replace('\xa0', "")
if not line or line.startswith('#'):
continue
items = line.split('\t')
if len(items) > 4:
info = items[3]
for _ in ("andromeda", "banjori", "banload", "bedep", "bhek", "bhek2", "blackvine", "browlock", "citadel", "corebot", "cridex", "cryptowall", "darkcomet", "dexter", "dircrypt", "dridex", "dyre", "fareit", "geinimi", "gh0st", "gorynych", "goz", "gozi", "gumblar", "hesperbot", "kaixin", "katrina", "kazy", "keitaro", "kelihos", "kins", "koobface", "kryptik", "matsnu", "napolar", "necurs", "neurevt", "njrat", "nymaim", "passwordstealer", "pkybot", "pony", "p0ny", "posmalware", "poweliks", "pushdo", "pykspa", "qakbot", "ramnit", "ranbyus", "rbn", "rovnix", "runforestrun", "russiandoll", "shiotob", "shylock", "simda", "soaksoak", "sofacy", "suppobox", "teslacrypt", "tinba", "vawtrak", "waledac", "yigido", "zemot", "zeus"):
if re.search(r"(?i)\b%s\b" % _, info):
info = "%s (malware)" % _
break
retval[items[2]] = (info.replace('_', ' '), __reference__)
return retval
| mit | -8,636,587,465,493,791,000 | 48.323529 | 742 | 0.562314 | false | 2.886403 | false | false | false |
ProvincalInovationManagement/provinceManagement | debug_toolbar/panels/cache.py | 2 | 6845 | import inspect
import sys
import time
from django.conf import settings
from django.core import cache
from django.core.cache import get_cache as base_get_cache
from django.core.cache.backends.base import BaseCache
from django.dispatch import Signal
from django.template import Node
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _, ungettext
from debug_toolbar.panels import DebugPanel
from debug_toolbar.utils import (tidy_stacktrace, render_stacktrace,
get_template_info, get_stack)
cache_called = Signal(providing_args=["time_taken", "name", "return_value", "args", "kwargs", "trace"])
def send_signal(method):
def wrapped(self, *args, **kwargs):
t = time.time()
value = method(self, *args, **kwargs)
t = time.time() - t
enable_stacktraces = getattr(settings,
'DEBUG_TOOLBAR_CONFIG', {}).get('ENABLE_STACKTRACES', True)
if enable_stacktraces:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
template_info = None
cur_frame = sys._getframe().f_back
try:
while cur_frame is not None:
if cur_frame.f_code.co_name == 'render':
node = cur_frame.f_locals['self']
if isinstance(node, Node):
template_info = get_template_info(node.source)
break
cur_frame = cur_frame.f_back
except:
pass
del cur_frame
cache_called.send(sender=self.__class__, time_taken=t,
name=method.__name__, return_value=value,
args=args, kwargs=kwargs, trace=stacktrace,
template_info=template_info, backend=self.cache)
return value
return wrapped
class CacheStatTracker(BaseCache):
"""A small class used to track cache calls."""
def __init__(self, cache):
self.cache = cache
def __repr__(self):
return u"<CacheStatTracker for %s>" % self.cache.__repr__()
def _get_func_info(self):
frame = sys._getframe(3)
info = inspect.getframeinfo(frame)
return (info[0], info[1], info[2], info[3])
def __contains__(self, key):
return self.cache.__contains__(key)
def make_key(self, *args, **kwargs):
return self.cache.make_key(*args, **kwargs)
def validate_key(self, *args, **kwargs):
self.cache.validate_key(*args, **kwargs)
def clear(self):
return self.cache.clear()
@send_signal
def add(self, *args, **kwargs):
return self.cache.add(*args, **kwargs)
@send_signal
def get(self, *args, **kwargs):
return self.cache.get(*args, **kwargs)
@send_signal
def set(self, *args, **kwargs):
return self.cache.set(*args, **kwargs)
@send_signal
def delete(self, *args, **kwargs):
return self.cache.delete(*args, **kwargs)
@send_signal
def has_key(self, *args, **kwargs):
return self.cache.has_key(*args, **kwargs)
@send_signal
def incr(self, *args, **kwargs):
return self.cache.incr(*args, **kwargs)
@send_signal
def decr(self, *args, **kwargs):
return self.cache.decr(*args, **kwargs)
@send_signal
def get_many(self, *args, **kwargs):
return self.cache.get_many(*args, **kwargs)
@send_signal
def set_many(self, *args, **kwargs):
self.cache.set_many(*args, **kwargs)
@send_signal
def delete_many(self, *args, **kwargs):
self.cache.delete_many(*args, **kwargs)
@send_signal
def incr_version(self, *args, **kwargs):
return self.cache.incr_version(*args, **kwargs)
@send_signal
def decr_version(self, *args, **kwargs):
return self.cache.decr_version(*args, **kwargs)
class CacheDebugPanel(DebugPanel):
"""
Panel that displays the cache statistics.
"""
name = 'Cache'
template = 'debug_toolbar/panels/cache.html'
has_content = True
def __init__(self, *args, **kwargs):
super(CacheDebugPanel, self).__init__(*args, **kwargs)
self.total_time = 0
self.hits = 0
self.misses = 0
self.calls = []
self.counts = SortedDict((
('add', 0),
('get', 0),
('set', 0),
('delete', 0),
('get_many', 0),
('set_many', 0),
('delete_many', 0),
('has_key', 0),
('incr', 0),
('decr', 0),
('incr_version', 0),
('decr_version', 0),
))
cache_called.connect(self._store_call_info)
def _store_call_info(self, sender, name=None, time_taken=0,
return_value=None, args=None, kwargs=None, trace=None,
template_info=None, backend=None, **kw):
if name == 'get':
if return_value is None:
self.misses += 1
else:
self.hits += 1
elif name == 'get_many':
for key, value in return_value.iteritems():
if value is None:
self.misses += 1
else:
self.hits += 1
self.total_time += time_taken * 1000
self.counts[name] += 1
self.calls.append({
'time': time_taken,
'name': name,
'args': args,
'kwargs': kwargs,
'trace': render_stacktrace(trace),
'template_info': template_info,
'backend': backend
})
def nav_title(self):
return _('Cache')
def nav_subtitle(self):
cache_calls = len(self.calls)
return ungettext('%(cache_calls)d call in %(time).2fms',
'%(cache_calls)d calls in %(time).2fms',
cache_calls) % {'cache_calls': cache_calls,
'time': self.total_time}
def title(self):
count = len(getattr(settings, 'CACHES', ['default']))
return ungettext('Cache calls from %(count)d backend',
'Cache calls from %(count)d backends',
count) % dict(count=count)
def url(self):
return ''
def process_response(self, request, response):
self.record_stats({
'total_calls': len(self.calls),
'calls': self.calls,
'total_time': self.total_time,
'hits': self.hits,
'misses': self.misses,
'counts': self.counts,
})
def get_cache_debug(*args, **kwargs):
base_cache = base_get_cache(*args, **kwargs)
return CacheStatTracker(base_cache)
cache.cache = CacheStatTracker(cache.cache)
cache.get_cache = get_cache_debug
| agpl-3.0 | -7,011,836,031,029,467,000 | 29.833333 | 103 | 0.547115 | false | 3.873797 | false | false | false |
LowResourceLanguages/hltdi-l3 | disambiguatr/extract.py | 1 | 1209 | #!/usr/bin/env python3
"""
Common routines for pulling useful examples of Quechua adjectives the
generation of which requires lexical choice.
Used by extractbibles and extractelicitation at least!
"""
import yaml
import l3
es_adj = set()
def load_spanish_adjectives():
spanish_adj_fn = "../l3xdg/languages/es/es_adj.yaml"
with open(spanish_adj_fn) as infile:
adjs = yaml.load(infile)
for entry in adjs:
if "word" in entry:
es_adj.add(entry["word"])
quechua_cache = {}
def quechuaMA(word):
"""Caching wrapper around the l3 Quechua morphological analyzer."""
if word not in quechua_cache:
quechua_cache[word] = l3.anal_word("qu", word, raw=True)
return quechua_cache[word]
def get_pos(word):
"""Given a word in Spanish, return its POS tag."""
pass
## try looking up the word in a dictionary.
## which dictionaries of Spanish do we have?
## runasimi, spanish wordnet, dicAML...
## how do we deal with morphology in Spanish? What if we're looking for
## "alta", but only "alto" is in the dictionary? Do stemming?
## can morphological analysis with AntiMorfo help?
## failing that, return "OOV".
| gpl-3.0 | 8,643,722,207,213,988,000 | 28.487805 | 75 | 0.66584 | false | 3.267568 | false | false | false |
mallconnectionorg/openerp | rrhh/l10n_cl_hr_payroll/model/hr_fixed_allocation.py | 1 | 2830 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Pedro Arroyo M <[email protected]>
# Copyright (C) 2015 Mall Connection(<http://www.mallconnection.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class hr_fixed_allocation(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.fixed.allocation'
_description = 'hr.fixed.allocation'
_columns = {
'name':fields.char('Description', size=64, required=True, readonly=False),
#'code':fields.char('Code', size=64, required=False, readonly=False),
#'type':fields.selection([
# ('collation','Collation'),
# ('mobilization','Mobilization'),
# ('cash_loss','Cash loss'),
# ('tool_wear','Tool wear')
# ('bonification','Bonification')
# ], 'Type'),
'amount': fields.float('Amount', digits=(3,2),required=True),
'allocation_type_id':fields.many2one('hr.fixed.allocation.type', 'Allocation type', required=True),
'contract_id':fields.many2one('hr.contract', 'Contract', required=False),
#'taxable':fields.boolean('Taxable', required=False),
}
hr_fixed_allocation()
class hr_fixed_allocation_type(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.fixed.allocation.type'
_description = 'hr.fixed.allocation type'
_columns = {
'name':fields.char('Description', size=64, required=True, readonly=False),
'code':fields.char('Code', size=64, required=False, readonly=False),
'type':fields.selection([
('collation','Collation'),
('mobilization','Mobilization'),
('cash_loss','Cash loss'),
('tool_wear','Tool wear'),
('bonification','Bonification')
], 'Type'),
'taxable':fields.boolean('Taxable', required=False),
}
hr_fixed_allocation_type() | agpl-3.0 | -5,406,903,646,967,186,000 | 38.873239 | 112 | 0.565371 | false | 4.066092 | false | false | false |
tholo/formhelpers2 | setup.py | 1 | 1068 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'colander',
'pyramid',
'pyramid_debugtoolbar',
'WebHelpers',
]
setup(name='formhelpers2',
version='0.0',
description='formhelpers2',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="formhelpers2",
entry_points = """\
[paste.app_factory]
main = formhelpers2:main
""",
paster_plugins=['pyramid'],
)
| bsd-2-clause | 2,393,569,444,776,156,700 | 25.04878 | 63 | 0.58427 | false | 3.645051 | false | true | false |
igormunkin/CMC-CTMM | task04_particles/controller.py | 1 | 5900 | import math
import model
import cython_verlet
import threading
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
def pure_python_verlet(particles):
G = 6.67 * (10 ** -11)
for p in particles:
new_a = sum(map(lambda e:
G * e.mass * (e.position['x'] - p.position['x'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)) , filter(lambda e: not e is p, particles)))
new_b = sum(map(lambda e:
G * e.mass * (e.position['y'] - p.position['y'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)), filter(lambda e: not e is p, particles)))
if p.time > 0:
p.position['x'] += p.velocity['u'] + 0.5 * p.acceleration['a']
p.position['y'] += p.velocity['v'] + 0.5 * p.acceleration['b']
p.time += 1
p.velocity['u'] += 0.5 * (new_a + p.acceleration['a'])
p.velocity['v'] += 0.5 * (new_b + p.acceleration['b'])
p.acceleration['a'] = new_a
p.acceleration['b'] = new_b
return [p.position for p in particles]
def verlet_worker(particles, begin, end):
G = 6.67 * (10 ** -11)
for p in particles[begin : end]:
new_a = sum(map(lambda e:
G * e.mass * (e.position['x'] - p.position['x'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)) , filter(lambda e: not e is p, particles)))
new_b = sum(map(lambda e:
G * e.mass * (e.position['y'] - p.position['y'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)), filter(lambda e: not e is p, particles)))
if p.time > 0:
p.position['x'] += p.velocity['u'] + 0.5 * p.acceleration['a']
p.position['y'] += p.velocity['v'] + 0.5 * p.acceleration['b']
p.time += 1
p.velocity['u'] += 0.5 * (new_a + p.acceleration['a'])
p.velocity['v'] += 0.5 * (new_b + p.acceleration['b'])
p.acceleration['a'] = new_a
p.acceleration['b'] = new_b
def multiprocess_verlet(particles):
jobs = []
for i in range(len(particles)):
job = threading.Thread(target = verlet_worker, args = (particles, i, i + 1))
job.start()
jobs.append(job)
for j in jobs:
j.join()
return [p.position for p in particles]
class ParticlePlot(FigureCanvasQTAgg):
def __init__(self, parent, width, height, dpi, size_policy):
figure = matplotlib.figure.Figure(figsize = (width, height), dpi = dpi,
facecolor = 'white')
self.axes = figure.add_axes([0.005,0.005,0.990,0.990], frameon=True, aspect=1)
FigureCanvasQTAgg.__init__(self, figure)
self.setParent(parent)
FigureCanvasQTAgg.setSizePolicy(self, size_policy, size_policy)
FigureCanvasQTAgg.updateGeometry(self)
self.figure.canvas.draw()
def update_plot(self, particles, updater):
self.axes.cla()
self.axes.set_xlim(-45, 45), self.axes.set_xticks([])
self.axes.set_ylim(-25, 25), self.axes.set_yticks([])
data = updater(particles)
mass_data = [ p.mass / 1000 for p in particles ]
color_data = [ p.color for p in particles ]
x_data = [ p['x'] for p in data ]
y_data = [ p['y'] for p in data ]
self.scatter = self.axes.scatter(x_data, y_data, s = mass_data, lw = 0.5,
c = color_data)
self.figure.canvas.draw()
class ParticleController:
defaults = {
'mass': 1250 * 1000,
'lifetime': 4,
'velocity': { 'u': 5, 'v': 7 },
'position': { 'x': 0, 'y': 0 },
'color': (0, 1, 0),
'method': 0
}
def __init__(self):
self.__mass = __class__.defaults['mass']
self.__lifetime = __class__.defaults['lifetime']
self.__velocity = __class__.defaults['velocity']
self.__position = __class__.defaults['position']
self.__color = __class__.defaults['color'];
self.method = __class__.defaults['method']
self.particles = []
self.updaters = [
pure_python_verlet,
cython_verlet.cython_verlet,
multiprocess_verlet,
]
self.methods = [
"Pure Python Verlet algorithm implementation",
"Cython Verlet algorithm implementation",
"Multiprocess Verlet algorithm implementation",
]
def __add_particle(self):
self.particles.append(model.WildParticle(
self.position['x'], self.position['y'],
self.velocity['u'], self.velocity['v'],
self.mass, self.color, self.lifetime
) )
@property
def position(self):
return self.__position
@position.setter
def position(self, value):
self.__position = value
@property
def velocity(self):
return self.__velocity
@velocity.setter
def velocity(self, value):
self.__velocity = value
@property
def acceleration(self):
return self.__acceleration
@acceleration.setter
def acceleration(self, value):
self.__acceleration = value
@property
def mass(self):
return self.__mass
@mass.setter
def mass(self, value):
self.__mass = value
@property
def lifetime(self):
return self.__lifetime
@lifetime.setter
def lifetime(self, value):
self.__lifetime = value
@property
def color(self):
return self.__color
@color.setter
def color_set(self, value):
self.__color = value
| mit | 411,129,055,774,413,700 | 32.522727 | 86 | 0.537288 | false | 3.524492 | false | false | false |
ecreall/lagendacommun | lac/views/admin_process/see_site_folders.py | 1 | 2032 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import math
from pyramid.view import view_config
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.views.filter import find_entities
from lac.content.interface import ISiteFolder
from lac.content.processes.admin_process.behaviors import (
SeeSiteFolders)
from lac.content.lac_application import (
CreationCulturelleApplication)
from lac import _
CONTENTS_MESSAGES = {
'0': _(u"""No element found"""),
'1': _(u"""One element found"""),
'*': _(u"""${nember} elements found""")
}
@view_config(
name='seesitefolders',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeSiteFoldersView(BasicView):
title = ''
name = 'seesitefolders'
behaviors = [SeeSiteFolders]
template = 'lac:views/admin_process/templates/see_sitefolders.pt'
viewid = 'seesitefolders'
def update(self):
self.execute(None)
# root = getSite()
# folders = root.site_folders
folders = find_entities(
user=get_current(),
interfaces=[ISiteFolder],
sort_on='modified_at', reverse=True)
result = {}
len_result = len(folders)
index = str(len_result)
if len_result > 1:
index = '*'
self.title = _(CONTENTS_MESSAGES[index],
mapping={'nember': len_result})
values = {'folders': list(folders),
'row_len': math.ceil(len_result/6)}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update({SeeSiteFolders: SeeSiteFoldersView})
| agpl-3.0 | 4,620,561,385,707,153,000 | 29.787879 | 73 | 0.656988 | false | 3.583774 | false | false | false |
rtucker-mozilla/mozilla_inventory | core/network/models.py | 1 | 8198 | from django.db import models
from django.core.exceptions import ValidationError
from mozdns.validation import validate_ip_type
from mozdns.ip.models import ipv6_to_longs
from core.utils import IPFilter, one_to_two, to_a
from core.vlan.models import Vlan
from core.site.models import Site
from core.mixins import ObjectUrlMixin
from core.keyvalue.base_option import CommonOption
import ipaddr
class Network(models.Model, ObjectUrlMixin):
id = models.AutoField(primary_key=True)
vlan = models.ForeignKey(Vlan, null=True,
blank=True, on_delete=models.SET_NULL)
site = models.ForeignKey(Site, null=True,
blank=True, on_delete=models.SET_NULL)
# NETWORK/NETMASK FIELDS
IP_TYPE_CHOICES = (('4', 'ipv4'), ('6', 'ipv6'))
ip_type = models.CharField(max_length=1, choices=IP_TYPE_CHOICES,
editable=True, validators=[validate_ip_type])
ip_upper = models.BigIntegerField(null=False, blank=True)
ip_lower = models.BigIntegerField(null=False, blank=True)
# This field is here so ES can search this model easier.
network_str = models.CharField(max_length=49, editable=True,
help_text="The network address of this "
"network.")
prefixlen = models.PositiveIntegerField(null=False,
help_text="The number of binary "
"1's in the netmask.")
dhcpd_raw_include = models.TextField(
null=True, blank=True, help_text="The config options in this box "
"will be included *as is* in the dhcpd.conf file for this "
"subnet."
)
network = None
def details(self):
details = [
('Network', self.network_str),
]
if self.vlan:
details.append(
('Vlan',
to_a("{0}:{1}".format(self.vlan.name, self.vlan.number),
self.vlan)))
if self.site:
details.append(('Site', to_a(self.site.full_name, self.site)))
return details
class Meta:
db_table = 'network'
unique_together = ('ip_upper', 'ip_lower', 'prefixlen')
def delete(self, *args, **kwargs):
if self.range_set.all().exists():
raise ValidationError("Cannot delete this network because it has "
"child ranges")
super(Network, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
self.clean()
super(Network, self).save(*args, **kwargs)
def clean(self):
self.update_network()
# Look at all ranges that claim to be in this subnet, are they actually
# in the subnet?
for range_ in self.range_set.all():
"""
I was writing checks to make sure that subnets wouldn't orphan
ranges. IPv6 needs support.
"""
fail = False
# Check the start addresses.
if range_.start_upper < self.ip_upper:
fail = True
elif (range_.start_upper > self.ip_upper and range_.start_lower <
self.ip_lower):
fail = True
elif (range_.start_upper == self.ip_upper and range_.start_lower
< self.ip_lower):
fail = True
if self.ip_type == '4':
brdcst_upper, brdcst_lower = 0, int(self.network.broadcast)
else:
brdcst_upper, brdcst_lower = ipv6_to_longs(str(
self.network.broadcast))
# Check the end addresses.
if range_.end_upper > brdcst_upper:
fail = True
elif (range_.end_upper < brdcst_upper and range_.end_lower >
brdcst_lower):
fail = True
elif (range_.end_upper == brdcst_upper and range_.end_lower
> brdcst_lower):
fail = True
if fail:
raise ValidationError("Resizing this subnet to the requested "
"network prefix would orphan existing "
"ranges.")
def update_ipf(self):
"""Update the IP filter. Used for compiling search queries and firewall
rules."""
self.update_network()
self.ipf = IPFilter(self.network.network, self.network.broadcast,
self.ip_type, object_=self)
def update_network(self):
"""This function will look at the value of network_str to update other
fields in the network object. This function will also set the 'network'
attribute to either an ipaddr.IPv4Network or ipaddr.IPv6Network object.
"""
if not isinstance(self.network_str, basestring):
raise ValidationError("ERROR: No network str.")
try:
if self.ip_type == '4':
self.network = ipaddr.IPv4Network(self.network_str)
elif self.ip_type == '6':
self.network = ipaddr.IPv6Network(self.network_str)
else:
raise ValidationError("Could not determine IP type of network"
" %s" % (self.network_str))
except (ipaddr.AddressValueError, ipaddr.NetmaskValueError):
raise ValidationError("Invalid network for ip type of "
"'{0}'.".format(self, self.ip_type))
# Update fields
self.ip_upper, self.ip_lower = one_to_two(int(self.network))
self.prefixlen = self.network.prefixlen
def __str__(self):
return self.network_str
def __repr__(self):
return "<Network {0}>".format(str(self))
class NetworkKeyValue(CommonOption):
obj = models.ForeignKey(Network, related_name='keyvalue_set', null=False)
aux_attrs = (
('description', 'A description of the site'),
)
class Meta:
db_table = 'network_key_value'
unique_together = ('key', 'value', 'obj')
"""The NetworkOption Class.
"DHCP option statements always start with the option keyword, followed
by an option name, followed by option data." -- The man page for
dhcpd-options
In this class, options are stored without the 'option' keyword. If it
is an option, is option should be set.
"""
def save(self, *args, **kwargs):
self.clean()
super(NetworkKeyValue, self).save(*args, **kwargs)
def _aa_description(self):
"""
A descrition of this network
"""
pass
def _aa_filename(self):
"""
filename filename;
The filename statement can be used to specify the name of the
initial boot file which is to be loaded by a client. The filename
should be a filename recognizable to whatever file transfer
protocol the client can be expected to use to load the file.
"""
self.is_statement = True
self.is_option = False
self.has_validator = True
# Anything else?
def _aa_next_server(self):
"""
The next-server statement
next-server server-name;
The next-server statement is used to specify the host address
of the server from which the initial boot file (specified in
the filename statement) is to be loaded. Server-name should be
a numeric IP address or a domain name. If no next-server
parameter applies to a given client, the DHCP server's IP
address is used.
"""
self.has_validator = True
self.is_statement = True
self.is_option = False
self._single_ip(self.obj.ip_type)
def _aa_dns_servers(self):
"""
A list of DNS servers for this network.
"""
self.is_statement = False
self.is_option = False
self._ip_list(self.obj.ip_type)
def _aa_routers(self):
self._routers(self.obj.ip_type)
def _aa_ntp_servers(self):
self._ntp_servers(self.obj.ip_type)
| bsd-3-clause | -2,350,841,018,741,059,600 | 35.598214 | 79 | 0.566236 | false | 4.319283 | false | false | false |
clchiou/garage | py/imagetools/imagetools/__init__.py | 1 | 1554 | """Image manipulation tools."""
__all__ = [
'ImageError',
'Unsupported',
'ImageFormat',
'detect_format',
'resize',
'resize_unsafe',
]
import enum
import os
import tempfile
from . import _imagetools
from ._imagetools import ImageError
class Unsupported(ImageError):
pass
@enum.unique
class ImageFormat(enum.Enum):
UNKNOWN = _imagetools.FORMAT_UNKNOWN
GIF = _imagetools.FORMAT_GIF
JPEG = _imagetools.FORMAT_JPEG
PNG = _imagetools.FORMAT_PNG
def detect_format(image):
"""Detect image format."""
return ImageFormat(_imagetools.detect_format(image))
def resize(image, desired_width, output_path):
"""Resize an image to the desired_width.
It writes to a temporary file while processing, and so it does not
clobber output file on error. If clobbering output is not an issue,
you may use resize_unsafe, which is faster.
"""
tmp_path = None
try:
fd, tmp_path = tempfile.mkstemp()
os.close(fd) # Close fd immediately (don't leak it!).
dimension = resize_unsafe(image, desired_width, tmp_path)
os.rename(tmp_path, output_path)
tmp_path = None
finally:
if tmp_path is not None:
os.remove(tmp_path)
return dimension
def resize_unsafe(image, desired_width, output_path):
"""Unsafe version of resize."""
image_format = detect_format(image)
if image_format is ImageFormat.JPEG:
return _imagetools.resize_jpeg(image, desired_width, output_path)
else:
raise Unsupported
| mit | 972,284,685,737,102,000 | 21.521739 | 73 | 0.658945 | false | 3.682464 | false | false | false |
nicnab/pygol | minigol.py | 1 | 1181 | #!/usr/bin/env python
# MINI PYGOL - Game of Life - Nico Nabholz ([email protected])
gen = 1
cell = {}
import sys, os, random, time
os.system('clear')
rows, columns = os.popen('stty size', 'r').read().split()
y = int(rows) - 2
x = int(columns)
while 1:
print chr(27) + "[H" + "Game Of Life (Generation: %d)" % gen
for r in range(y):
for c in range(x):
if (r * c == 0 or r == y-1 or c == x-1): cell[c, r, gen] = 0
elif gen == 1: cell[c, r, gen] = int(round(random.randrange(0,4,1)/3))
else:
homies = (cell[c-1, r-1, gen-1] + cell[c, r-1, gen-1] + cell[c+1, r-1, gen-1] +
cell[c-1, r, gen-1] + cell[c+1, r, gen-1] +
cell[c-1, r+1, gen-1] + cell[c, r+1, gen-1] + cell[c+1, r+1, gen-1])
if (cell[c, r, gen-1] == 0 and homies == 3): cell[c, r, gen] = 1
elif (cell[c, r, gen-1] == 1 and (homies == 2 or homies == 3)): cell[c, r, gen] = 1
else: cell[c, r, gen] = 0
if cell[c, r, gen] == 1: sys.stdout.write('O')
else: sys.stdout.write(' ')
print
time.sleep(.1)
gen += 1
| mit | 9,068,482,770,594,076,000 | 38.366667 | 99 | 0.464014 | false | 2.567391 | false | false | false |
voltagex/rtorrent-python | rtorrent/file.py | 2 | 4032 | from rtorrent.rpc import RPCObject, BaseMulticallBuilder
from rtorrent.rpc.processors import *
class File(RPCObject):
def __init__(self, context, info_hash, index):
super().__init__(context)
self.rpc_id = "{0}:f{1}".format(info_hash, index)
def rpc_call(self, key, *args):
call = super().rpc_call(key, *args)
call.get_args().insert(0, self.rpc_id)
return call
class FileMetadata(object):
def __init__(self, results: dict):
self.results = results
def __getattr__(self, item):
return lambda: self.results[item]
class FileMulticallBuilder(BaseMulticallBuilder):
__metadata_cls__ = FileMetadata
__rpc_object_cls__ = File
__multicall_rpc_method__ = 'f.multicall'
def __init__(self, context, torrent):
super().__init__(context)
self.args.extend([torrent.get_info_hash(), ''])
_VALID_FILE_PRIORITIES = ['off', 'normal', 'high']
File.register_rpc_method('get_size_bytes',
['f.get_size_bytes', 'f.size_bytes'])
File.register_rpc_method('get_size_chunks',
['f.get_size_chunks', 'f.size_chunks'])
File.register_rpc_method('get_path', ['f.get_path', 'f.path'])
File.register_rpc_method('get_priority', ['f.get_priority', 'f.priority'],
post_processors=[lambda x:
_VALID_FILE_PRIORITIES[x]])
File.register_rpc_method('set_priority', ['f.set_priority', 'f.priority.set'],
pre_processors=[valmap(_VALID_FILE_PRIORITIES,
range(0, 3), 1)],
post_processors=[check_success])
File.register_rpc_method('get_completed_chunks', 'f.completed_chunks')
File.register_rpc_method('get_frozen_path', 'f.frozen_path')
File.register_rpc_method('get_last_touched',
['f.get_last_touched', 'f.last_touched'],
post_processors=[to_datetime])
File.register_rpc_method('get_offset', ['f.get_offset', 'f.offset'])
File.register_rpc_method('get_path_components',
['f.get_path_components', 'f.path_components'])
File.register_rpc_method('get_path_depth',
['f.get_path_depth', 'f.path_depth'])
File.register_rpc_method('get_range_first',
['f.get_range_first', 'f.range_first'])
File.register_rpc_method('get_range_second',
['f.get_range_second', 'f.range_second'])
File.register_rpc_method('is_create_queued', 'f.is_create_queued',
boolean=True)
File.register_rpc_method('is_open', 'f.is_open',
boolean=True)
File.register_rpc_method('get_completed_chunks',
['f.get_completed_chunks', 'f.completed_chunks'])
File.register_rpc_method('get_match_depth_next',
['f.get_match_depth_next', 'f.match_depth_next'])
File.register_rpc_method('get_match_depth_prev',
['f.get_match_depth_prev', 'f.match_depth_prev'])
File.register_rpc_method('is_prioritized_first', 'f.prioritize_first',
boolean=True)
File.register_rpc_method('enable_prioritize_first',
'f.prioritize_first.enable',
post_processor=[check_success])
File.register_rpc_method('disable_prioritize_first',
'f.prioritize_first.disable',
post_processor=[check_success])
File.register_rpc_method('is_prioritized_last', 'f.prioritize_last',
boolean=True)
File.register_rpc_method('enable_prioritize_last',
'f.prioritize_last.enable',
post_processor=[check_success])
File.register_rpc_method('disable_prioritize_last',
'f.prioritize_last.disable',
post_processor=[check_success])
File.register_rpc_method('is_created', 'f.is_created',
boolean=True)
| mit | -2,445,007,893,928,782,300 | 45.344828 | 78 | 0.570685 | false | 3.625899 | false | false | false |
dc3-plaso/plaso | tests/parsers/asl.py | 1 | 2743 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Apple System Log file parser."""
import unittest
from plaso.formatters import asl # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import asl
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class ASLParserTest(test_lib.ParserTestCase):
"""Tests for Apple System Log file parser."""
@shared_test_lib.skipUnlessHasTestFile([u'applesystemlog.asl'])
def testParse(self):
"""Tests the Parse function."""
parser_object = asl.ASLParser()
storage_writer = self._ParseFile(
[u'applesystemlog.asl'], parser_object)
self.assertEqual(len(storage_writer.events), 2)
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-25 09:45:35.705481')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.record_position, 442)
self.assertEqual(event_object.message_id, 101406)
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.sender, u'locationd')
self.assertEqual(event_object.facility, u'com.apple.locationd')
self.assertEqual(event_object.pid, 69)
self.assertEqual(event_object.user_sid, u'205')
self.assertEqual(event_object.group_id, 205)
self.assertEqual(event_object.read_uid, 205)
self.assertEqual(event_object.read_gid, 0xffffffff)
self.assertEqual(event_object.level, 4)
# Note that "compatiblity" is spelt incorrectly in the actual message being
# tested here.
expected_message = (
u'Incorrect NSStringEncoding value 0x8000100 detected. '
u'Assuming NSASCIIStringEncoding. Will stop this compatiblity '
u'mapping behavior in the near future.')
self.assertEqual(event_object.message, expected_message)
expected_extra = (
u'CFLog Local Time: 2013-11-25 09:45:35.701, '
u'CFLog Thread: 1007, '
u'Sender_Mach_UUID: 50E1F76A-60FF-368C-B74E-EB48F6D98C51')
self.assertEqual(event_object.extra_information, expected_extra)
expected_msg = (
u'MessageID: 101406 '
u'Level: WARNING (4) '
u'User ID: 205 '
u'Group ID: 205 '
u'Read User: 205 '
u'Read Group: ALL '
u'Host: DarkTemplar-2.local '
u'Sender: locationd '
u'Facility: com.apple.locationd '
u'Message: {0:s} {1:s}').format(expected_message, expected_extra)
expected_msg_short = (
u'Sender: locationd '
u'Facility: com.apple.locationd')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -442,569,407,062,469,250 | 32.864198 | 79 | 0.687204 | false | 3.365644 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.