repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ecaldwe1/zika | website/apps/home/urls.py | 1 | 2253 | #!/bin/env python3.4
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import url
from website.apps.home.views.upload_job_view import upload_job_view
from website.apps.home.views.BrowseView import BrowseView
from website.apps.home.views.ChartView import ChartView, CountryTotalChartView
from website.apps.home.views.MapView import MapView, csv_for_map_view
from website.apps.home.views.UploadView import UploadView
from website.apps.home.views.delete_simulation_view import delete_simulation_view
urlpatterns = [
# the list of simulations
url(r'^$', BrowseView.as_view(), name='home.display_simulations'),
url(r'^historical/$', BrowseView.as_view(), kwargs={"is_historical":True}, name='home.display_historical'),
# upload simulation/historical data view
url(r'^upload/', UploadView.as_view(), name="simulation.upload"),
# get csv data for rendering choropleth map
url(r'^csv_for_map/(?P<sim_id>[0-9]+)/(?P<inquery_date>[0-9, -]+)/$', csv_for_map_view, name='home.csv_for_map'),
# views for the charts (country totals or municipality)
url(r'^chart/(?P<simulation_id>\d+)/total/$', CountryTotalChartView.as_view(), name='home.countrytotalchart'),
url(r'^chart/(?P<simulation_id>\d+)/(?P<municipality_code>\d+)/$', ChartView.as_view(), name="simulation.chart"),
# Permanently delete simulation
url(r'^delete/(?P<simulation_id>\d+)/$', delete_simulation_view, name="simulation.delete"),
# views for the map
url(r'^map/(?P<model_id>[0-9]+)/(?P<sim_id>[0-9]+)/$', MapView.as_view(), name='home.mapview'),
url(r'^map/(?P<model_id>[0-9]+)/(?P<sim_id>[0-9]+)/(?P<municipality_code>\d+)/$',
MapView.as_view(), name='home.mapview'),
# API
url(r'^api/upload_job/(?P<pk>\d+)/$', upload_job_view, name="api.upload_job"),
]
| mpl-2.0 | -4,590,224,854,248,846,000 | 46.93617 | 117 | 0.694629 | false |
JoKaWare/WTL-DUI | tools/grit/grit/format/policy_templates/writers/doc_writer_unittest.py | 1 | 19423 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.doc_writer'''
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../../../..'))
import tempfile
import unittest
import StringIO
from xml.dom import minidom
from grit.format import rc
from grit.format.policy_templates.writers import writer_unittest_common
from grit.format.policy_templates.writers import doc_writer
from grit import grd_reader
from grit import util
from grit.tool import build
class MockMessageDictionary:
'''A mock dictionary passed to a writer as the dictionary of
localized messages.
'''
# Dictionary of messages.
msg_dict = {}
class DocWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for DocWriter.'''
def setUp(self):
# Create a writer for the tests.
self.writer = doc_writer.GetWriter(
config={
'app_name': 'Chrome',
'frame_name': 'Chrome Frame',
'os_name': 'Chrome OS',
'win_reg_mandatory_key_name': 'MockKey',
})
self.writer.messages = {
'doc_back_to_top': {'text': '_test_back_to_top'},
'doc_data_type': {'text': '_test_data_type'},
'doc_description': {'text': '_test_description'},
'doc_description_column_title': {
'text': '_test_description_column_title'
},
'doc_example_value': {'text': '_test_example_value'},
'doc_feature_dynamic_refresh': {'text': '_test_feature_dynamic_refresh'},
'doc_feature_can_be_recommended': {'text': '_test_feature_recommended'},
'doc_intro': {'text': '_test_intro'},
'doc_mac_linux_pref_name': {'text': '_test_mac_linux_pref_name'},
'doc_note': {'text': '_test_note'},
'doc_name_column_title': {'text': '_test_name_column_title'},
'doc_not_supported': {'text': '_test_not_supported'},
'doc_since_version': {'text': '_test_since_version'},
'doc_supported': {'text': '_test_supported'},
'doc_supported_features': {'text': '_test_supported_features'},
'doc_supported_on': {'text': '_test_supported_on'},
'doc_win_reg_loc': {'text': '_test_win_reg_loc'},
'doc_bla': {'text': '_test_bla'},
}
self.writer.Init()
# It is not worth testing the exact content of style attributes.
# Therefore we override them here with shorter texts.
for key in self.writer._STYLE.keys():
self.writer._STYLE[key] = 'style_%s;' % key
# Add some more style attributes for additional testing.
self.writer._STYLE['key1'] = 'style1;'
self.writer._STYLE['key2'] = 'style2;'
# Create a DOM document for the tests.
dom_impl = minidom.getDOMImplementation('')
self.doc = dom_impl.createDocument(None, 'root', None)
self.doc_root = self.doc.documentElement
def testSkeleton(self):
# Test if DocWriter creates the skeleton of the document correctly.
self.writer.BeginTemplate()
self.assertEquals(
self.writer._main_div.toxml(),
'<div>'
'<div>'
'<a name="top"/><br/>_test_intro<br/><br/><br/>'
'<table style="style_table;">'
'<thead><tr style="style_tr;">'
'<td style="style_td;style_td.left;style_thead td;">'
'_test_name_column_title'
'</td>'
'<td style="style_td;style_td.right;style_thead td;">'
'_test_description_column_title'
'</td>'
'</tr></thead>'
'<tbody/>'
'</table>'
'</div>'
'<div/>'
'</div>')
def testGetLocalizedMessage(self):
# Test if localized messages are retrieved correctly.
self.writer.messages = {
'doc_hello_world': {'text': 'hello, vilag!'}
}
self.assertEquals(
self.writer._GetLocalizedMessage('hello_world'),
'hello, vilag!')
def testMapListToString(self):
# Test function DocWriter.MapListToString()
self.assertEquals(
self.writer._MapListToString({'a1': 'a2', 'b1': 'b2'}, ['a1', 'b1']),
'a2, b2')
self.assertEquals(
self.writer._MapListToString({'a1': 'a2', 'b1': 'b2'}, []),
'')
result = self.writer._MapListToString(
{'a': '1', 'b': '2', 'c': '3', 'd': '4'}, ['b', 'd'])
expected_result = '2, 4'
self.assertEquals(
result,
expected_result)
def testAddStyledElement(self):
# Test function DocWriter.AddStyledElement()
# Test the case of zero style.
e1 = self.writer._AddStyledElement(
self.doc_root, 'z', [], {'a': 'b'}, 'text')
self.assertEquals(
e1.toxml(),
'<z a="b">text</z>')
# Test the case of one style.
e2 = self.writer._AddStyledElement(
self.doc_root, 'z', ['key1'], {'a': 'b'}, 'text')
self.assertEquals(
e2.toxml(),
'<z a="b" style="style1;">text</z>')
# Test the case of two styles.
e3 = self.writer._AddStyledElement(
self.doc_root, 'z', ['key1', 'key2'], {'a': 'b'}, 'text')
self.assertEquals(
e3.toxml(),
'<z a="b" style="style1;style2;">text</z>')
def testAddDescriptionIntEnum(self):
# Test if URLs are replaced and choices of 'int-enum' policies are listed
# correctly.
policy = {
'type': 'int-enum',
'items': [
{'value': 0, 'caption': 'Disable foo'},
{'value': 2, 'caption': 'Solve your problem'},
{'value': 5, 'caption': 'Enable bar'},
],
'desc': '''This policy disables foo, except in case of bar.
See http://policy-explanation.example.com for more details.
'''
}
self.writer._AddDescription(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'''<root>This policy disables foo, except in case of bar.
See <a href="http://policy-explanation.example.com">http://policy-explanation.example.com</a> for more details.
<ul><li>0 = Disable foo</li><li>2 = Solve your problem</li><li>5 = Enable bar</li></ul></root>''')
def testAddDescriptionStringEnum(self):
# Test if URLs are replaced and choices of 'int-enum' policies are listed
# correctly.
policy = {
'type': 'string-enum',
'items': [
{'value': "one", 'caption': 'Disable foo'},
{'value': "two", 'caption': 'Solve your problem'},
{'value': "three", 'caption': 'Enable bar'},
],
'desc': '''This policy disables foo, except in case of bar.
See http://policy-explanation.example.com for more details.
'''
}
self.writer._AddDescription(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'''<root>This policy disables foo, except in case of bar.
See <a href="http://policy-explanation.example.com">http://policy-explanation.example.com</a> for more details.
<ul><li>"one" = Disable foo</li><li>"two" = Solve your problem</li><li>"three" = Enable bar</li></ul></root>''')
def testAddFeatures(self):
# Test if the list of features of a policy is handled correctly.
policy = {
'features': {
'spaceship_docking': False,
'dynamic_refresh': True,
'can_be_recommended': True,
}
}
self.writer._FEATURE_MAP = {
'can_be_recommended': 'Can Be Recommended',
'dynamic_refresh': 'Dynamic Refresh',
'spaceship_docking': 'Spaceship Docking',
}
self.writer._AddFeatures(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'Can Be Recommended: _test_supported, '
'Dynamic Refresh: _test_supported, '
'Spaceship Docking: _test_not_supported'
'</root>')
def testAddListExample(self):
policy = {
'name': 'PolicyName',
'example_value': ['Foo', 'Bar']
}
self.writer._AddListExample(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'<dl style="style_dd dl;">'
'<dt>Windows:</dt>'
'<dd style="style_.monospace;style_.pre;">'
'MockKey\\PolicyName\\1 = "Foo"\n'
'MockKey\\PolicyName\\2 = "Bar"'
'</dd>'
'<dt>Linux:</dt>'
'<dd style="style_.monospace;">'
'["Foo", "Bar"]'
'</dd>'
'<dt>Mac:</dt>'
'<dd style="style_.monospace;style_.pre;">'
'<array>\n'
' <string>Foo</string>\n'
' <string>Bar</string>\n'
'</array>'
'</dd>'
'</dl>'
'</root>')
def testBoolExample(self):
# Test representation of boolean example values.
policy = {
'name': 'PolicyName',
'type': 'main',
'example_value': True
}
e1 = self.writer.AddElement(self.doc_root, 'e1')
self.writer._AddExample(e1, policy)
self.assertEquals(
e1.toxml(),
'<e1>0x00000001 (Windows), true (Linux), <true /> (Mac)</e1>')
policy = {
'name': 'PolicyName',
'type': 'main',
'example_value': False
}
e2 = self.writer.AddElement(self.doc_root, 'e2')
self.writer._AddExample(e2, policy)
self.assertEquals(
e2.toxml(),
'<e2>0x00000000 (Windows), false (Linux), <false /> (Mac)</e2>')
def testIntEnumExample(self):
# Test representation of 'int-enum' example values.
policy = {
'name': 'PolicyName',
'type': 'int-enum',
'example_value': 16
}
self.writer._AddExample(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>0x00000010 (Windows), 16 (Linux/Mac)</root>')
def testStringEnumExample(self):
# Test representation of 'int-enum' example values.
policy = {
'name': 'PolicyName',
'type': 'string-enum',
'example_value': "wacky"
}
self.writer._AddExample(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>"wacky"</root>')
def testStringExample(self):
# Test representation of 'string' example values.
policy = {
'name': 'PolicyName',
'type': 'string',
'example_value': 'awesome-example'
}
self.writer._AddExample(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>"awesome-example"</root>')
def testIntExample(self):
# Test representation of 'int' example values.
policy = {
'name': 'PolicyName',
'type': 'int',
'example_value': 26
}
self.writer._AddExample(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>0x0000001a (Windows), 26 (Linux/Mac)</root>')
def testAddPolicyAttribute(self):
# Test creating a policy attribute term-definition pair.
self.writer._AddPolicyAttribute(
self.doc_root, 'bla', 'hello, world', ['key1'])
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'<dt style="style_dt;">_test_bla</dt>'
'<dd style="style1;">hello, world</dd>'
'</root>')
def testAddPolicyDetails(self):
# Test if the definition list (<dl>) of policy details is created correctly.
policy = {
'type': 'main',
'name': 'TestPolicyName',
'caption': 'TestPolicyCaption',
'desc': 'TestPolicyDesc',
'supported_on': [{
'product': 'chrome',
'platforms': ['win'],
'since_version': '8',
'until_version': '',
}],
'features': {'dynamic_refresh': False},
'example_value': False
}
self.writer.messages['doc_since_version'] = {'text': '...$6...'}
self.writer._AddPolicyDetails(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root><dl>'
'<dt style="style_dt;">_test_data_type</dt><dd>Boolean (REG_DWORD)</dd>'
'<dt style="style_dt;">_test_win_reg_loc</dt>'
'<dd style="style_.monospace;">MockKey\TestPolicyName</dd>'
'<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
'<dd style="style_.monospace;">TestPolicyName</dd>'
'<dt style="style_dt;">_test_supported_on</dt>'
'<dd>'
'<ul style="style_ul;">'
'<li>Chrome (Windows) ...8...</li>'
'</ul>'
'</dd>'
'<dt style="style_dt;">_test_supported_features</dt>'
'<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
'<dt style="style_dt;">_test_description</dt><dd>TestPolicyDesc</dd>'
'<dt style="style_dt;">_test_example_value</dt>'
'<dd>0x00000000 (Windows), false (Linux), <false /> (Mac)</dd>'
'</dl></root>')
def testAddPolicyNote(self):
# TODO(jkummerow): The functionality tested by this test is currently not
# used for anything and will probably soon be removed.
# Test if nodes are correctly added to policies.
policy = {
'problem_href': 'http://www.example.com/5'
}
self.writer.messages['doc_note'] = {'text': '...$6...'}
self.writer._AddPolicyNote(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root><div style="style_div.note;">...'
'<a href="http://www.example.com/5">http://www.example.com/5</a>'
'...</div></root>')
def testAddPolicyRow(self):
# Test if policies are correctly added to the summary table.
policy = {
'name': 'PolicyName',
'caption': 'PolicyCaption',
'type': 'string',
}
self.writer._indent_level = 3
self.writer._AddPolicyRow(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root><tr style="style_tr;">'
'<td style="style_td;style_td.left;padding-left: 49px;">'
'<a href="#PolicyName">PolicyName</a>'
'</td>'
'<td style="style_td;style_td.right;">PolicyCaption</td>'
'</tr></root>')
self.setUp()
policy = {
'name': 'PolicyName',
'caption': 'PolicyCaption',
'type': 'group',
}
self.writer._indent_level = 2
self.writer._AddPolicyRow(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root><tr style="style_tr;">'
'<td colspan="2" style="style_td;style_td.left;padding-left: 35px;">'
'<a href="#PolicyName">PolicyCaption</a>'
'</td>'
'</tr></root>')
def testAddPolicySection(self):
# Test if policy details are correctly added to the document.
policy = {
'name': 'PolicyName',
'caption': 'PolicyCaption',
'desc': 'PolicyDesc',
'type': 'string',
'supported_on': [{
'product': 'chrome',
'platforms': ['win'],
'since_version': '7',
'until_version': '',
}],
'features': {'dynamic_refresh': False},
'example_value': False
}
self.writer.messages['doc_since_version'] = {'text': '..$6..'}
self.writer._AddPolicySection(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'<div style="margin-left: 0px">'
'<h3><a name="PolicyName"/>PolicyName</h3>'
'<span>PolicyCaption</span>'
'<dl>'
'<dt style="style_dt;">_test_data_type</dt>'
'<dd>String (REG_SZ)</dd>'
'<dt style="style_dt;">_test_win_reg_loc</dt>'
'<dd style="style_.monospace;">MockKey\\PolicyName</dd>'
'<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
'<dd style="style_.monospace;">PolicyName</dd>'
'<dt style="style_dt;">_test_supported_on</dt>'
'<dd>'
'<ul style="style_ul;">'
'<li>Chrome (Windows) ..7..</li>'
'</ul>'
'</dd>'
'<dt style="style_dt;">_test_supported_features</dt>'
'<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
'<dt style="style_dt;">_test_description</dt>'
'<dd>PolicyDesc</dd>'
'<dt style="style_dt;">_test_example_value</dt>'
'<dd>"False"</dd>'
'</dl>'
'<a href="#top">_test_back_to_top</a>'
'</div>'
'</root>')
# Test for groups.
self.setUp()
policy['type'] = 'group'
self.writer._AddPolicySection(self.doc_root, policy)
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'<div style="margin-left: 0px">'
'<h2><a name="PolicyName"/>PolicyCaption</h2>'
'<div style="style_div.group_desc;">PolicyDesc</div>'
'<a href="#top">_test_back_to_top</a>'
'</div>'
'</root>')
def testAddDictionaryExample(self):
policy = {
'name': 'PolicyName',
'caption': 'PolicyCaption',
'desc': 'PolicyDesc',
'type': 'dict',
'supported_on': [{
'product': 'chrome',
'platforms': ['win'],
'since_version': '7',
'until_version': '',
}],
'features': {'dynamic_refresh': False},
'example_value': {
"ProxyMode": "direct",
"List": ["1", "2", "3"],
"True": True,
"False": False,
"Integer": 123,
"DictList": [ {
"A": 1,
"B": 2,
}, {
"C": 3,
"D": 4,
},
],
},
}
self.writer._AddDictionaryExample(self.doc_root, policy)
value = str(policy['example_value'])
self.assertEquals(
self.doc_root.toxml(),
'<root>'
'<dl style="style_dd dl;">'
'<dt>Windows:</dt>'
'<dd style="style_.monospace;style_.pre;">MockKey\PolicyName = '
'"' + value + '"'
'</dd>'
'<dt>Linux:</dt>'
'<dd style="style_.monospace;">PolicyName: ' + value + '</dd>'
'<dt>Mac:</dt>'
'<dd style="style_.monospace;style_.pre;">'
'<key>PolicyName</key>\n'
'<dict>\n'
' <key>DictList</key>\n'
' <array>\n'
' <dict>\n'
' <key>A</key>\n'
' <integer>1</integer>\n'
' <key>B</key>\n'
' <integer>2</integer>\n'
' </dict>\n'
' <dict>\n'
' <key>C</key>\n'
' <integer>3</integer>\n'
' <key>D</key>\n'
' <integer>4</integer>\n'
' </dict>\n'
' </array>\n'
' <key>False</key>\n'
' <false/>\n'
' <key>Integer</key>\n'
' <integer>123</integer>\n'
' <key>List</key>\n'
' <array>\n'
' <string>1</string>\n'
' <string>2</string>\n'
' <string>3</string>\n'
' </array>\n'
' <key>ProxyMode</key>\n'
' <string>direct</string>\n'
' <key>True</key>\n'
' <true/>\n'
'</dict>'
'</dd>'
'</dl>'
'</root>')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,785,424,312,809,519,400 | 33.560498 | 142 | 0.547907 | false |
ebernhardson/l2r | code/feature_base.py | 1 | 13541 | import numpy as np
import pandas as pd
import os
import functools
import config
from utils import np_utils, table_utils
# Base class for measuring correlation/similarity/distance
# between the search query and page information
class BaseEstimator(object):
def __init__(self, obs_corpus, target_corpus, aggregation_mode, id_list=None):
self.obs_corpus = obs_corpus
self.N = len(obs_corpus)
# for standalone features, range so zip works right
self.target_corpus = range(self.N) if target_corpus is None else target_corpus
# id_list is used for group based relevance/distance detectors (where?)
self.id_list = range(self.N) if id_list is None else id_list
# aggregation for multi-value input fields such as hit_heading and hit_category
self.aggregation_mode, self.aggregator = self._check_aggregation_mode(aggregation_mode)
def _check_aggregation_mode(self, aggregation_mode):
valid_aggregation_modes = ["", "size", "mean", "std", "max", "min", "median"]
if isinstance(aggregation_mode, str):
aggregation_mode = [aggregation_mode]
if isinstance(aggregation_mode, list):
for m in aggregation_mode:
assert m.lower() in valid_aggregation_modes, "Wrong aggregation mode: %s" % (m)
aggregation_mode = [m.lower() for m in aggregation_mode]
aggregator = [None if m == "" else getattr(np, m) for m in aggregation_mode]
return aggregation_mode, aggregator
def transform(self):
# generate scores
score = list(map(self.transform_one, self.obs_corpus, self.target_corpus, self.id_list))
# aggregation
if isinstance(score[0], list):
# single aggregation
res = np.zeros((self.N, len(self.aggregator)), dtype=float)
for m, aggregator in enumerate(self.aggregator):
for i in range(self.N):
try:
s = aggregator(score[i])
except:
s = config.MISSING_VALUE_NUMERIC
res[i,m] = s
else:
res = np.asarray(score)
return res
class BaseMultiEstimatorWrapper(object):
"""
Base class for wrapping an estimator to support multi-obs or multi-target
values such as redirect.title and heading
"""
def __init__(self, generator):
self.generator = generator
def __call__(self, *args, **kwargs):
est = self.generator(*args, **kwargs)
assert est.aggregation_mode != [""]
# Evil hax
name = est.__name__()
def __name__():
return ["%s_%s" % (name, x) for x in est.aggregation_mode]
est.__name__ = __name__
# It would be nice if we could deduplicate here as well, but
# it requires pulling the full dataset into memory. A quick
# test with outgoing links took >10GB before being canceled
est.transform_one = self.gen_transform_one(est.transform_one)
return est
class MultiObjEstimatorWrapper(BaseMultiEstimatorWrapper):
def gen_transform_one(self, transform_one):
def replacement(obs, target, id):
assert isinstance(obs, tuple)
return [transform_one(x, target, id) for x in obs]
return replacement
class MultiTargetEstimatorWrapper(BaseMultiEstimatorWrapper):
def gen_transform_one(self, transform_one):
def replacement(obs, target, id):
assert isinstance(target, tuple)
return [transform_one(obs, x, id) for x in target]
return replacement
def make_transformer(dfAll, field):
if field in config.ES_DOC_FIELDS:
transformer = functools.partial(ShelveLookupTransformer, config.ES_PAGE_DOCS_SHELVE, field)
field = 'hit_page_id'
elif field[-8:] == '_termvec':
if field[:-8] in config.ES_TERM_FIELDS:
es_field = field[:-8]
fname = config.ES_PAGE_TERM_VEC_SHELVE
field = 'hit_page_id'
elif field[:6] == 'query_':
es_field = field[6:-8]
fname = config.ES_QUERY_TERM_VEC_SHELVE
field = 'query'
elif field[:11] == 'norm_query_':
es_field = field[11:-8]
fname = config.ES_QUERY_TERM_VEC_SHELVE
field = 'norm_query'
else:
es_field = None
if es_field in config.ES_TERM_FIELDS:
transformer = functools.partial(ShelveLookupTransformer, fname, es_field)
else:
transformer = None
elif not field in dfAll.columns:
transformer = None
else:
transformer = NoopTransformer
return transformer, field
def make_deduplicator(deduplicate, dfAll, obs_field, target_field):
if not deduplicate:
return NoopDeduplicator(dfAll, obs_field, target_field)
elif target_field is None:
return SingleFieldDeduplicator(dfAll, obs_field)
else:
return DualFieldDeduplicator(dfAll, obs_field, target_field)
# Wrapper for generating standalone features, e.g.
# count of words in a search query
class StandaloneFeatureWrapper(object):
def __init__(self, generator, dfAll, obs_fields, param_list, feat_dir, logger, deduplicate=False):
self.generator = generator
self.dfAll = dfAll
self.obs_fields = obs_fields
self.param_list = param_list
self.feat_dir = feat_dir
self.logger = logger
self.make_deduplicator = functools.partial(make_deduplicator, deduplicate, dfAll)
def go(self):
y_train = self.dfAll["relevance"].values
for obs_field in self.obs_fields:
obs_transformer, obs_field_transformed = make_transformer(self.dfAll, obs_field)
if obs_transformer is None:
self.logger.info("Skip %s" % (obs_field))
continue
deduplicator = self.make_deduplicator(obs_field_transformed, None)
obs_corpus, _ = deduplicator.deduplicate()
obs_trans = obs_transformer(obs_corpus)
estimator = self.generator(obs_trans, None, *self.param_list)
x = deduplicator.reduplicate(obs_corpus, None, estimator.transform())
if isinstance(estimator.__name__(), list):
for i, feat_name in enumerate(estimator.__name__()):
self.save_feature(feat_name, obs_field, 1, x[:,i], y_train)
else:
dim = np_utils._dim(x)
self.save_feature(estimator.__name__(), obs_field, dim, x, y_train)
def save_feature(self, feat_name, obs_field, dim, x, y):
fname = "%s_%s_%dD" % (feat_name, obs_field, dim)
table_utils._write(os.path.join(self.feat_dir, fname+config.FEAT_FILE_SUFFIX), x)
if dim == 1:
corr = np_utils._corr(x, y)
self.logger.info("%s (%dD): corr=%.6f" % (fname, dim, corr))
# wrapper for generating pairwise feature, e.g.,
# intersect count of words between query and page title
class PairwiseFeatureWrapper(object):
def __init__(self, generator, dfAll, obs_fields, target_fields, param_list, feat_dir, logger, deduplicate=False):
self.generator = generator
self.dfAll = dfAll
self.obs_fields = obs_fields
self.target_fields = target_fields
self.param_list = param_list
self.feat_dir = feat_dir
self.logger = logger
self.make_deduplicator = functools.partial(make_deduplicator, deduplicate, dfAll)
def go(self):
y_train = self.dfAll['relevance'].values
for obs_field in self.obs_fields:
obs_transformer, obs_field_transformed = make_transformer(self.dfAll, obs_field)
if obs_transformer is None:
self.logger.info("Skip %s" % (obs_field))
continue
for target_field in self.target_fields:
target_transformer, target_field_transformed = make_transformer(self.dfAll, target_field)
if target_transformer is None:
self.logger.info("Skip %s" % (target_field))
continue
deduplicator = self.make_deduplicator(obs_field_transformed, target_field_transformed)
obs_corpus, target_corpus = deduplicator.deduplicate()
obs_trans = obs_transformer(obs_corpus)
target_trans = target_transformer(target_corpus)
estimator = self.generator(obs_trans, target_trans, *self.param_list)
x = deduplicator.reduplicate(obs_corpus, target_corpus, estimator.transform())
if isinstance(estimator.__name__(), list):
for i, feat_name in enumerate(estimator.__name__()):
self.save_feature(feat_name, obs_field, target_field, 1, x[:,i], y_train)
else:
dim = np_utils._dim(x)
self.save_feature(estimator.__name__(), obs_field, target_field, dim, x, y_train)
# Release memory between iterations. Not sure if necessary yet,
# but noticed some strange memory usage so trying this out
del obs_corpus
del obs_trans
del target_corpus
del target_trans
del x
def save_feature(self, feat_name, obs_field, target_field, dim, x, y):
fname = "%s_%s_x_%s_%dD" % (feat_name, obs_field, target_field, dim)
table_utils._write(os.path.join(self.feat_dir, fname + config.FEAT_FILE_SUFFIX), x)
if dim == 1:
corr = np_utils._corr(x, y)
self.logger.info("%s (%dD): corr=%.6f" % (fname, dim, corr))
class NoopTransformer(object):
def __init__(self, corpus):
self.corpus = corpus
def __len__(self):
return len(self.corpus)
def __iter__(self):
return iter(self.corpus)
def __getitem__(self, i):
return self.corpus[i]
# Could be more carefull .. but we will only open at
# most 3 (currently) in read only so whatever...
open_shelves = {}
class ShelveLookupTransformer(object):
def __init__(self, filename, field, corpus):
self.filename = filename
self.field = field
self.corpus = corpus
if not filename in open_shelves:
open_shelves[filename] = table_utils._open_shelve_read(self.filename)
self.data = open_shelves[filename]
def __len__(self):
return len(self.corpus)
def __iter__(self):
for key in self.corpus:
if isinstance(key, unicode):
val = self.data[key.encode('utf8')]
else:
val = self.data[str(key)]
yield val if self.field is None else val[self.field]
def __getitem__(self, i):
key = self.corpus[i]
if isinstance(key, unicode):
val = self.data[key.encode('utf8')]
else:
val = self.data[str(key)]
return val if self.field is None else val[self.field]
class NoopDeduplicator(object):
"""
Fills the deduplicator interface, but does nothing for
estimators that don't want deduplication
"""
def __init__(self, dfAll, obs_field, target_field):
self.dfAll = dfAll
self.obs_field = obs_field
self.target_field = target_field
def deduplicate(self):
obs_corpus = self.dfAll[self.obs_field].values
target_corpus = None if self.target_field is None else self.dfAll[self.target_field].value
return obs_corpus, target_corpus
def reduplicate(self, obs_corpus, target_corpus, x):
return x
class SingleFieldDeduplicator(object):
def __init__(self, dfAll, obs_field):
self.dfAll = dfAll
self.obs_field = obs_field
def deduplicate(self):
obs_corpus = self.dfAll[self.obs_field].drop_duplicates().values
return obs_corpus, None
def reduplicate(self, obs_corpus, target_corpus, x):
# re-duplicate the values
x_df = pd.DataFrame(zip(obs_corpus, x), columns=['src', 'est']).set_index(['src'])
# We need obs_field in a list to ensure we get back a DataFrame and not a Series
x_redup = self.dfAll[[self.obs_field]].join(x_df, on=[self.obs_field], how='left')['est'].values
# This feels like a hack, but we have ended up with an ndarray of ndarray on
# aggregations and need to fix it
if type(x[0]) == np.ndarray:
x_redup = np.vstack(x_redup)
return x_redup
class DualFieldDeduplicator(object):
def __init__(self, dfAll, obs_field, target_field):
self.dfAll = dfAll
self.obs_field = obs_field
self.target_field = target_field
def deduplicate(self):
combined_corpus = self.dfAll[[self.obs_field, self.target_field]].drop_duplicates().values
obs_corpus = combined_corpus[:,0]
target_corpus = combined_corpus[:,1]
return obs_corpus, target_corpus
def reduplicate(self, obs_corpus, target_corpus, x):
x_df = pd.DataFrame(zip(obs_corpus, target_corpus, x), columns=['src1', 'src2', 'est']) \
.set_index(['src1', 'src2'])
x_redup = self.dfAll[[self.obs_field, self.target_field]] \
.join(x_df, on=[self.obs_field, self.target_field], how='left')['est'].values
# This feels like a hack, but we have ended up with an ndarray of ndarray on
# aggregations and need to fix it
if type(x[0]) == np.ndarray:
x_redup = np.vstack(x_redup)
return x_redup
| mit | -4,614,662,668,924,669,000 | 39.786145 | 117 | 0.605494 | false |
whitepyro/debian_server_setup | sickbeard/providers/torrentleech.py | 1 | 10817 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
import datetime
import urlparse
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
class TorrentLeechProvider(generic.TorrentProvider):
urls = {'base_url': 'https://torrentleech.org/',
'login': 'https://torrentleech.org/user/account/login/',
'detail': 'https://torrentleech.org/torrent/%s',
'search': 'https://torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://torrentleech.org%s',
'index': 'https://torrentleech.org/torrents/browse/index/categories/%s',
}
def __init__(self):
generic.TorrentProvider.__init__(self, "TorrentLeech")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = TorrentLeechCache(self)
self.url = self.urls['base_url']
self.categories = "2,26,27,32"
def isEnabled(self):
return self.enabled
def imageName(self):
return 'torrentleech.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'remember_me': 'on',
'login': 'submit',
}
self.session = requests.Session()
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
return False
if re.search('Invalid Username/password', response.text) \
or re.search('<title>Login :: TorrentLeech.org</title>', response.text) \
or response.status_code == 401:
logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return []
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if mode == 'RSS':
searchURL = self.urls['index'] % self.categories
else:
searchURL = self.urls['search'] % (search_string, self.categories)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
torrent_table = html.find('table', attrs={'id': 'torrenttable'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)
continue
for result in torrent_table.find_all('tr')[1:]:
try:
link = result.find('td', attrs={'class': 'name'}).find('a')
url = result.find('td', attrs={'class': 'quickdownload'}).find('a')
title = link.string
download_url = self.urls['download'] % url['href']
id = int(link['href'].replace('/torrent/', ''))
seeders = int(result.find('td', attrs={'class': 'seeders'}).string)
leechers = int(result.find('td', attrs={'class': 'leechers'}).string)
except (AttributeError, TypeError):
continue
#Filter unseeded torrent
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
if not title or not download_url:
continue
item = title, download_url, id, seeders, leechers
logger.log(u"Found result: " + title + "(" + download_url + ")", logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class TorrentLeechCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll TorrentLeech every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return self.provider._doSearch(search_params)
provider = TorrentLeechProvider()
| gpl-3.0 | -3,897,464,253,921,688,000 | 37.222615 | 118 | 0.555699 | false |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/CosmoMC-master/batch2/outputs/FAP_fsigma8.py | 1 | 1561 |
import planckStyle as s
from paramgrid import batchjob
from pylab import *
from getdist.densities import Density2D
roots = ['base_' + s.defdata + '_lensing']
g = s.getSinglePlotter(ratio=1)
pars = g.get_param_array(roots[0], ['FAP057', 'fsigma8z057'])
def RSDdensity(FAPbar, f8bar, covfile):
incov = loadtxt(covfile)
invcov = inv(inv(incov)[1:, 1:])
FAPv = np.arange(0.56, 0.78, 0.003)
f8v = np.arange(0.28, 0.63, 0.003)
FAP, f8 = np.meshgrid(FAPv, f8v)
like = (FAP - FAPbar) ** 2 * invcov[0, 0] + 2 * (FAP - FAPbar) * (f8 - f8bar) * invcov[0, 1] + (f8 - f8bar) ** 2 * invcov[1, 1]
density = Density2D(FAPv, f8v, exp(-like / 2))
density.contours = exp(-np.array([1.509, 2.4477]) ** 2 / 2)
return density
FAPbar = 0.6725
f8bar = 0.4412
density = RSDdensity(FAPbar, f8bar, batchjob.getCodeRootPath() + 'data/sdss_DR11CMASS_RSD_bao_invcov_Samushia.txt')
g.add_2d_contours(roots[0], 'FAP057', 'fsigma8z057', filled=True, density=density)
# CS = contourf(FAP, f8, like, origin='lower', levels=[2.279, 5.991], colors='r')
FAPbar = .683
f8bar = 0.422
density = RSDdensity(FAPbar, f8bar, batchjob.getCodeRootPath() + 'data/sdss_DR11CMASS_RSD_bao_invcov_Beutler.txt')
g.add_2d_contours(roots[0], 'FAP057', 'fsigma8z057', filled=False, density=density, ls=':', alpha=0.5)
g.add_2d_contours(roots[0], 'FAP057', 'fsigma8z057', filled=True, plotno=3)
g.add_legend(['BOSS CMASS (Samushia et al.)', 'BOSS CMASS (Beutler et al.)', s.defplanck + '+lensing'], legend_loc='upper left')
g.setAxes(params=pars)
g.export()
| gpl-3.0 | -9,192,729,011,655,010,000 | 27.907407 | 131 | 0.660474 | false |
atlefren/beercalc | app/api.py | 1 | 3653 | from flask import g
from app import app, db
from app.models import Malt, Hop, Yeast, Brew
import flask.ext.restless
from flask.ext.restless import ProcessingException
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
# TODO simplyfy this...
def verify_is_number(dict, value, errors):
if value in dict:
if dict[value] == "":
dict[value] = None
else:
try:
float(dict[value])
except Exception:
errors.append({"field": value, "message": "Must be number"})
def verify_is_set(dict, value, errors):
if not dict[value] or dict[value] == "":
errors.append({"field": value, "message": "Must be set"})
def malt_put_preprocessor(instid, data):
malt_verify(data)
return data
def malt_post_preprocessor(data):
malt_verify(data)
return data
def malt_verify(data):
errors = []
verify_is_set(data, "name", errors)
verify_is_number(data, "ppg", errors)
verify_is_number(data, "color", errors)
if errors:
raise ProcessingException(
message=errors,
status_code=400
)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(
Malt,
methods=['GET', 'POST', 'PUT', "DELETE"],
preprocessors={
'PATCH_SINGLE': [malt_put_preprocessor],
'POST': [malt_post_preprocessor],
},
)
def hop_put_preprocessor(instid, data):
hop_verify(data)
return data
def hop_post_preprocessor(data):
hop_verify(data)
return data
def hop_verify(data):
errors = []
verify_is_set(data, "name", errors)
verify_is_number(data, "alpha_acid", errors)
if errors:
raise ProcessingException(
message=errors,
status_code=400
)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(
Hop,
methods=['GET', 'POST', 'PUT', "DELETE"],
preprocessors={
'PATCH_SINGLE': [hop_put_preprocessor],
'POST': [hop_post_preprocessor],
},
)
def yeast_put_preprocessor(instid, data):
yeast_verify(data)
return data
def yeast_post_preprocessor(data):
yeast_verify(data)
return data
def yeast_verify(data):
errors = []
verify_is_set(data, "name", errors)
verify_is_number(data, "attenuation", errors)
if errors:
raise ProcessingException(
message=errors,
status_code=400
)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(
Yeast,
methods=['GET', 'POST', 'PUT', "DELETE"],
preprocessors={
'PATCH_SINGLE': [yeast_put_preprocessor],
'POST': [yeast_post_preprocessor],
},
)
def brew_put_preprocessor(instid, data):
brew = Brew.query.get(instid)
print brew.user_id
if not g.user.is_authenticated() or brew.user_id != g.user.id:
raise ProcessingException(
message='Not Authorized',
status_code=401
)
return data
def brew_post_preprocessor(data):
if not g.user.is_authenticated():
raise ProcessingException(
message='Not Authorized',
status_code=401
)
data["user_id"] = g.user.id
return data
manager.create_api(
Brew,
methods=['GET', 'POST', 'PUT'],
preprocessors={
'POST': [brew_post_preprocessor],
'PATCH_SINGLE': [brew_put_preprocessor],
},
)
| mit | -1,872,889,281,347,377,700 | 22.416667 | 76 | 0.617301 | false |
garrykevin-ep/Skillet | coding/views.py | 1 | 5064 | from django.shortcuts import render
from django.http import HttpResponse ,HttpResponseRedirect
from django.urls import reverse
from .models import *
from login.models import UserProfile
from quiz.models import Question
from django.core.files import File
# Create your views here.
def first_question():
list = Question.objects.all()
list = list[0]
return list
def last_question():
list = Question.objects.all()
len_lst = len(list)
list = list[len_lst-1]
return list
def find_wrong_line(correct_file_list,user_file_list):
#print correct_file_list
if len(correct_file_list) > len(user_file_list):
return -2
for i in range(0,len(correct_file_list)):
user_line = user_file_list[i].rstrip('\n')
correct_line = correct_file_list[i].rstrip('\n')
#print 'userline '+user_line
#print 'correct_line '+correct_line
if correct_line != user_line:
# print correct_line+" expexted this "+ " but got "+user_line
return i
return -1
def calculate_wrong(request,question,code,wrong_line,correct_file_list,user_file_list):
lines_per_testcase = code.lines_per_testcase
lines_per_answer = code.lines_per_answer
testcase_file = code.testcase_file
#testcase number failed,+1 for 0-indexing
ith_test_case_failed = (wrong_line/lines_per_answer)+1
# file open
testcase_file.open('r')
testcase_file_list = testcase_file.readlines()
testcase_file.close()
# file close
testcase_start = (lines_per_testcase*(ith_test_case_failed-1))+1
testcase_end = testcase_start + lines_per_testcase
failed_testcase = str()
for x in testcase_file_list[testcase_start:testcase_end]:
failed_testcase += x
output_start = lines_per_answer*(ith_test_case_failed-1)
output_end = output_start+lines_per_answer
user_output = str()
for x in user_file_list[output_start:output_end]:
user_output +=x
expected_output = str()
for x in correct_file_list[output_start:output_end]:
expected_output +=x
rmin = request.POST['min']
rsec = request.POST['sec']
submit = UserSubmission.objects.create(User=request.user,question=question,failed_testcase=failed_testcase,expected_output=expected_output,user_output=user_output,ith_test_case_failed=ith_test_case_failed,rmin=rmin,rsec=rsec)
submit.save()
# will hit only on post
def answer_coding(request,pk,question):
code = CodeQuestionExtend.objects.get(question=question)
correct_file = code.answer_file
# file open
correct_file.open(mode='r')
correct_file_list = correct_file.readlines()
correct_file.close()
# file close
user_file_list = request.FILES['userAnswer'].readlines()
rmin = request.POST['min']
rsec = request.POST['sec']
wrong_line = find_wrong_line(correct_file_list,user_file_list)
if wrong_line == -2:
# dic['message'] = "some testcases are missing"
submit = UserSubmission.objects.create(question=question,User=request.user,ith_test_case_failed=-2,rmin=rmin,rsec=rsec)
submit.save()
elif wrong_line != -1:
calculate_wrong(request,question,code,wrong_line,correct_file_list,user_file_list)
else:
status = CodeUserStatus.objects.get(question=question,User=request.user)
status.question_status = "Accepted"
status.rmin = rmin
status.save()
submit = UserSubmission.objects.create(User=request.user,question=question,ith_test_case_failed=-1,rmin=rmin,rsec=rsec)
submit.save()
return coding_display(request,pk,question)
def coding_display(request,pk,question):
if request.method == 'GET':
current_status = CodeUserStatus.objects.get(User=request.user,question=question)
user = UserProfile.objects.get(user =request.user)
code = CodeQuestionExtend.objects.get(question=question)
testcase = CodeTestCase.objects.filter(question=question)
user_submission = UserSubmission.objects.filter(User=request.user,question=question).order_by('-pk')
is_not_first_question = False
islast_question = False
if question != first_question():
is_not_first_question = True
if question == last_question():
islast_question = True
dic ={
'question' : question,
'code' :code,
'testcase' : testcase,
'current_status' : current_status,
'last_question' : islast_question,
'is_not_first_question' : is_not_first_question,
'user' : user,
'user_submission' : user_submission,
}
return render(request,'coding/index.html',dic)
else:
return HttpResponseRedirect(reverse('quiz:disp',args = (question.id,)))
def submission(request,pk,question):
user_submission = UserSubmission.objects.get(pk=pk,question=question)
if user_submission.User == request.user:
failed_testcase = str(user_submission.failed_testcase)
failed_testcase = failed_testcase.strip("[']")
failed_testcase = failed_testcase.split(',')
print failed_testcase
dic = {
'ith_test_case_failed' : user_submission.ith_test_case_failed,
'wrong_testcase' : failed_testcase,
'expected_output' : user_submission.expected_output,
'user_output' : user_submission.user_output
}
return render(request,'coding/submission.html',dic)
return HttpResponse("hi")
else:
return HttpResponse("You dont own this submission")
| bsd-3-clause | 3,437,592,960,928,893,000 | 31.670968 | 228 | 0.731635 | false |
tsl143/zamboni | mkt/developers/models.py | 1 | 18040 | import json
import posixpath
import string
import uuid
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
import commonware.log
import jinja2
from tower import ugettext as _
import mkt
from lib.crypto import generate_key
from lib.pay_server import client
from mkt.access.models import Group
from mkt.constants.payments import ACCESS_SIMULATE
from mkt.constants.payments import PROVIDER_BANGO, PROVIDER_CHOICES
from mkt.ratings.models import Review
from mkt.site.models import ManagerBase, ModelBase
from mkt.tags.models import Tag
from mkt.users.models import UserForeignKey, UserProfile
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = commonware.log.getLogger('z.devhub')
class CantCancel(Exception):
pass
class SolitudeSeller(ModelBase):
# TODO: When Solitude allows for it, this should be updated to be 1:1 with
# users.
user = UserForeignKey()
uuid = models.CharField(max_length=255, unique=True)
resource_uri = models.CharField(max_length=255)
class Meta:
db_table = 'payments_seller'
@classmethod
def create(cls, user):
uuid_ = str(uuid.uuid4())
res = client.api.generic.seller.post(data={'uuid': uuid_})
uri = res['resource_uri']
obj = cls.objects.create(user=user, uuid=uuid_, resource_uri=uri)
log.info('[User:%s] Created Solitude seller (uuid:%s)' %
(user, uuid_))
return obj
class PaymentAccount(ModelBase):
user = UserForeignKey()
name = models.CharField(max_length=64)
agreed_tos = models.BooleanField(default=False)
solitude_seller = models.ForeignKey(SolitudeSeller)
# These two fields can go away when we're not 1:1 with SolitudeSellers.
seller_uri = models.CharField(max_length=255, unique=True)
uri = models.CharField(max_length=255, unique=True)
# A soft-delete so we can talk to Solitude asynchronously.
inactive = models.BooleanField(default=False)
# The id for this account from the provider.
account_id = models.CharField(max_length=255)
# Each account will be for a particular provider.
provider = models.IntegerField(choices=PROVIDER_CHOICES,
default=PROVIDER_BANGO)
shared = models.BooleanField(default=False)
class Meta:
db_table = 'payment_accounts'
unique_together = ('user', 'uri')
def cancel(self, disable_refs=False):
"""Cancels the payment account.
If `disable_refs` is set, existing apps that use this payment account
will be set to STATUS_NULL.
"""
account_refs = AddonPaymentAccount.objects.filter(account_uri=self.uri)
if self.shared and account_refs:
# With sharing a payment account comes great responsibility. It
# would be really mean to create a payment account, share it
# and have lots of apps use it. Then one day you remove it and
# make a whole pile of apps in the marketplace get removed from
# the store, or have in-app payments fail.
#
# For the moment I'm just stopping this completely, if this ever
# happens, we'll have to go through a deprecation phase.
# - let all the apps that use it know
# - when they have all stopped sharing it
# - re-run this
log.error('Cannot cancel a shared payment account that has '
'apps using it.')
raise CantCancel('You cannot cancel a shared payment account.')
self.update(inactive=True)
log.info('Soft-deleted payment account (uri: %s)' % self.uri)
for acc_ref in account_refs:
if (disable_refs and
not acc_ref.addon.has_multiple_payment_accounts()):
log.info('Changing app status to NULL for app: {0}'
'because of payment account deletion'.format(
acc_ref.addon_id))
acc_ref.addon.update(status=mkt.STATUS_NULL)
log.info('Deleting AddonPaymentAccount for app: {0} because of '
'payment account deletion'.format(acc_ref.addon_id))
acc_ref.delete()
def get_provider(self):
"""Returns an instance of the payment provider for this account."""
# TODO: fix circular import. Providers imports models which imports
# forms which imports models.
from mkt.developers.providers import get_provider
return get_provider(id=self.provider)
def __unicode__(self):
date = self.created.strftime('%m/%y')
if not self.shared:
return u'%s - %s' % (date, self.name)
# L10n: {0} is the name of the account.
return _(u'Donate to {0}'.format(self.name))
def get_agreement_url(self):
return reverse('mkt.developers.provider.agreement', args=[self.pk])
class AddonPaymentAccount(ModelBase):
addon = models.ForeignKey(
'webapps.Webapp', related_name='app_payment_accounts')
payment_account = models.ForeignKey(PaymentAccount)
account_uri = models.CharField(max_length=255)
product_uri = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'addon_payment_account'
@property
def user(self):
return self.payment_account.user
class UserInappKey(ModelBase):
solitude_seller = models.ForeignKey(SolitudeSeller)
seller_product_pk = models.IntegerField(unique=True)
def secret(self):
return self._product().get()['secret']
def public_id(self):
return self._product().get()['public_id']
def reset(self):
self._product().patch(data={'secret': generate_key(48)})
@classmethod
def create(cls, user, public_id=None, secret=None, access_type=None):
if public_id is None:
public_id = str(uuid.uuid4())
if secret is None:
secret = generate_key(48)
if access_type is None:
access_type = ACCESS_SIMULATE
sel = SolitudeSeller.create(user)
prod = client.api.generic.product.post(data={
'seller': sel.resource_uri, 'secret': secret,
'external_id': str(uuid.uuid4()), 'public_id': public_id,
'access': access_type,
})
log.info(u'User %s created an in-app payments dev key product=%s '
u'with %s' % (unicode(user), prod['resource_pk'], sel))
return cls.objects.create(solitude_seller=sel,
seller_product_pk=prod['resource_pk'])
def _product(self):
return client.api.generic.product(self.seller_product_pk)
class Meta:
db_table = 'user_inapp_keys'
class PreloadTestPlan(ModelBase):
addon = models.ForeignKey('webapps.Webapp')
last_submission = models.DateTimeField(auto_now_add=True)
filename = models.CharField(max_length=60)
status = models.PositiveSmallIntegerField(default=mkt.STATUS_PUBLIC)
class Meta:
db_table = 'preload_test_plans'
ordering = ['-last_submission']
@property
def preload_test_plan_url(self):
host = (settings.PRIVATE_MIRROR_URL if self.addon.is_disabled
else settings.LOCAL_MIRROR_URL)
return posixpath.join(host, str(self.addon.id), self.filename)
# When an app is deleted we need to remove the preload test plan.
def preload_cleanup(*args, **kwargs):
instance = kwargs.get('instance')
PreloadTestPlan.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(preload_cleanup, sender=Webapp,
dispatch_uid='webapps_preload_cleanup')
class AppLog(ModelBase):
"""
This table is for indexing the activity log by app.
"""
addon = models.ForeignKey('webapps.Webapp', db_constraint=False)
activity_log = models.ForeignKey('ActivityLog')
class Meta:
db_table = 'log_activity_app'
ordering = ('-created',)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog')
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog')
version = models.ForeignKey(Version)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog')
user = models.ForeignKey(UserProfile)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
activity_log = models.ForeignKey('ActivityLog')
group = models.ForeignKey(Group)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class ActivityLogManager(ManagerBase):
def for_apps(self, apps):
vals = (AppLog.objects.filter(addon__in=apps)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_group(self, group):
return self.filter(grouplog__group=group)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=mkt.LOG_ADMINS + mkt.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=mkt.LOG_ADMINS)
def editor_events(self):
return self.filter(action__in=mkt.LOG_EDITORS)
def review_queue(self, webapp=False):
qs = self._by_type(webapp)
return (qs.filter(action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def total_reviews(self, webapp=False):
qs = self._by_type(webapp)
"""Return the top users, and their # of reviews."""
return (qs.values('user', 'user__display_name', 'user__email')
.filter(action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, webapp=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type(webapp)
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
return (qs.values('user', 'user__display_name', 'user__email')
.filter(created__gte=created_date,
action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_reviews_user_position(self, user, webapp=False):
return self.user_position(self.total_reviews(webapp), user)
def monthly_reviews_user_position(self, user, webapp=False):
return self.user_position(self.monthly_reviews(webapp), user)
def _by_type(self, webapp=False):
qs = super(ActivityLogManager, self).get_queryset()
return qs.extra(
tables=['log_activity_app'],
where=['log_activity_app.activity_log_id=log_activity.id'])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted([(value.id, key) for key, value in mkt.LOG.items()])
user = models.ForeignKey('users.UserProfile', null=True)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except:
log.debug('unserializing data from addon_log failed: %s' % self.id)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = item.items()[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
(app_label, model_name) = model_name.split('.')
model = apps.get_model(app_label, model_name)
# Cope with soft deleted models.
if hasattr(model, 'with_deleted'):
objs.extend(model.with_deleted.filter(pk=pk))
else:
objs.extend(model.objects.filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=[]):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, basestring):
serialize_me.append({'str': arg})
elif isinstance(arg, (int, long)):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Webapp, 3) for Webapp with pk=3
serialize_me.append(dict(((unicode(arg[0]._meta), arg[1]),)))
elif arg is not None:
serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return mkt.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = mkt.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
review = None
version = None
collection = None
tag = None
group = None
website = None
for arg in self.arguments:
if isinstance(arg, Webapp) and not addon:
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Review) and not review:
review = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), _('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = _('Version {0}')
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, Website) and not website:
website = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
try:
kw = dict(addon=addon, review=review, version=version, group=group,
collection=collection, tag=tag,
user=self.user.display_name)
return self.f(format, *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __unicode__(self):
return self.to_string()
def __html__(self):
return self
| bsd-3-clause | -7,578,592,785,815,707,000 | 33.893617 | 79 | 0.600554 | false |
zjj/trac_hack | trac/mimeview/pygments.py | 1 | 8368 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# Author: Matthew Good <[email protected]>
from datetime import datetime
import os
from pkg_resources import resource_filename
import re
from trac.core import *
from trac.config import ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web import IRequestHandler
from trac.web.chrome import add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
# Kludge to workaround the lack of absolute imports in Python version prior to
# 2.5
pygments = __import__('pygments', {}, {}, ['lexers', 'styles', 'formatters'])
get_all_lexers = pygments.lexers.get_all_lexers
get_lexer_by_name = pygments.lexers.get_lexer_by_name
HtmlFormatter = pygments.formatters.html.HtmlFormatter
get_all_styles = pygments.styles.get_all_styles
get_style_by_name = pygments.styles.get_style_by_name
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler)
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if self._types is None:
self._init_types()
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
if self._types is None:
self._init_types()
add_stylesheet(req, '/pygments/%s.css' %
req.session.get('pygments_style', self.default_style))
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield ('pygments', _('Syntax Highlighting'))
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _('Your preferences have been saved.'))
req.redirect(req.href.prefs(panel or None))
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError, e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# Internal methods
def _init_types(self):
self._types = {}
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases and aliases[0] or lexname
for mimetype in mimetypes:
self._types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments currently doesn't know application/javascript
if 'application/javascript' not in self._types:
js_entry = self._types.get('text/javascript')
if js_entry:
self._types['application/javascript'] = js_entry
self._types.update(
Mimeview(self.env).configured_modes_mapping('pygments')
)
def _generate(self, language, content):
lexer = get_lexer_by_name(language, stripnl=False)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = (None, -1, -1)
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
| bsd-3-clause | 283,857,220,382,943,170 | 32.741935 | 79 | 0.60325 | false |
apache/bloodhound | bloodhound_search/bhsearch/tests/__init__.py | 2 | 1578 | # -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest
from bhsearch.tests import (
api, index_with_whoosh, query_parser, query_suggestion,
search_resources, security, web_ui, whoosh_backend
)
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(api.suite())
test_suite.addTest(index_with_whoosh.suite())
test_suite.addTest(query_parser.suite())
test_suite.addTest(query_suggestion.suite())
test_suite.addTest(search_resources.suite())
test_suite.addTest(web_ui.suite())
test_suite.addTest(whoosh_backend.suite())
test_suite.addTest(security.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
else:
test_suite = suite()
| apache-2.0 | -4,713,938,990,775,815,000 | 33.304348 | 63 | 0.723701 | false |
Ezhil-Language-Foundation/open-tamil | examples/kural_mathirai.py | 1 | 2245 | #!/usr/bin/env python3
# This Python file uses the following encoding: utf-8
from kural import Thirukkural
from tamil.utf8 import get_letters, get_tamil_words, total_maaththirai
from collections import Counter, OrderedDict
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
# Define model function to be used to fit to the data above:
def gauss(x, *p):
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
def main():
eq = Counter()
eqd = {}
kural = Thirukkural()
for kural_no in range(1330):
kural_words = get_tamil_words(get_letters(kural.get_kural_no(kural_no + 1).ta))
mathirai = sum([total_maaththirai(word) for word in kural_words])
if eq[mathirai] == 0:
eqd[mathirai] = [kural_no + 1]
else:
eqd[mathirai].append(kural_no + 1)
eq[mathirai] += 1
eq_sorted = OrderedDict(sorted(eq.items(), key=lambda x: x))
pprint(eq_sorted)
pprint(eq_sorted.values())
pprint(eqd)
print("total = ", sum(eq.values()))
plt.scatter(eq_sorted.keys(), eq_sorted.values())
plt.ylabel(u"குறட்பாக்கள் எண்ணிக்கை", {"fontname": "Catamaran"})
plt.xlabel(u"மாத்திரை அளவு", {"fontname": "Catamaran"}) # Arial Unicode MS'})
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [75.0, 20.0, 5.0]
coeff, var_matrix = curve_fit(
gauss, list(eq_sorted.keys()), list(eq_sorted.values()), p0=p0
)
# Get the fitted curve
hist_fit = gauss(list(eq_sorted.keys()), *coeff)
plt.plot(
eq_sorted.keys(),
hist_fit,
label="Gaussian Fitted data (mean=%g, std=%g)" % (coeff[1], coeff[2]),
)
plt.title(
r"குறள் மாத்திரை வரிசை (Gauss \mu=%g, \sigma=%g)" % (coeff[1], coeff[2]),
{"fontname": "Catamaran"},
)
# Finally, lets get the fitting parameters, i.e. the mean and standard deviation:
print("Fitted mean = ", coeff[1])
print("Fitted standard deviation = ", coeff[2])
plt.show()
if __name__ == "__main__":
main()
| mit | -3,246,167,036,818,263,000 | 31.469697 | 87 | 0.608026 | false |
benjamindeleener/scad | scripts/msct_smooth.py | 1 | 30043 | #!/usr/bin/env python
#########################################################################################
#
# Module containing fitting functions
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Julien Touati
# Created: 2014-10-08
#
# About the license: see the file LICENSE.TXT
#########################################################################################
from scipy.interpolate import splrep, splev
import sct_utils as sct
#=======================================================================================================================
# Over pad the input file, smooth and return the centerline
#=======================================================================================================================
def smooth(fname, padding):
sct.run('isct_c3d '+fname+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
#=======================================================================================================================
# Spline 2D using splrep & splev
#=======================================================================================================================
def spline_2D(z_centerline, x_centerline):
from numpy import mean, std, sqrt
m = mean(x_centerline)
sigma = std(x_centerline)
print (m - sqrt(2*m))*(sigma**2), (m + sqrt(2*m))*(sigma**2)
smoothing_param = (((m + sqrt(2*m))*(sigma**2))+((m - sqrt(2*m))*(sigma**2)))/2
sct.printv('\nSmoothing results with spline...')
tck = splrep(z_centerline, x_centerline, s = smoothing_param)
x_centerline_fit = splev(z_centerline, tck)
return x_centerline_fit
#=======================================================================================================================
# Polynomial fit
#=======================================================================================================================
def polynomial_fit(x,y,degree):
import numpy as np
coeffs = np.polyfit(x, y, degree)
poly = np.poly1d(coeffs)
y_fit = np.polyval(poly, x)
return y_fit, poly
#=======================================================================================================================
# Polynomial derivative
#=======================================================================================================================
def polynomial_deriv(x,poly):
from numpy import polyder, polyval
poly_deriv = polyder(poly, m = 1)
y_fit_deriv = polyval(poly_deriv, x)
return y_fit_deriv, poly_deriv
#=======================================================================================================================
# Get norm
#=======================================================================================================================
def norm(x, y, z, p1, p2, p3):
from math import sqrt
s = 0
for i in xrange (len(x)-1):
s += sqrt((p1*(x[i+1]-x[i]))**2+(p2*(y[i+1]-y[i]))**2+(p3*(z[i+1]-z[i])**2))
print "centerline size: ", s
return s
#=======================================================================================================================
# Evaluate derivative of data points
#=======================================================================================================================
def evaluate_derivative_2D(x,y):
from numpy import array, append
y_deriv = array([(y[i+1]-y[i])/(x[i+1]-x[i]) for i in range(0, len(x)-1)])
y_deriv = append(y_deriv,(y[-1] - y[-2])/(x[-1] - x[-2]))
return y_deriv
#=======================================================================================================================
# Evaluate derivative of data points in 3D
#=======================================================================================================================
def evaluate_derivative_3D(x, y, z, px, py, pz):
from numpy import array, sqrt, insert, append
x = [x_elem*px for x_elem in x]
y = [y_elem*py for y_elem in y]
z = [z_elem*pz for z_elem in z]
x_deriv = array([(x[i+1]-x[i-1])/sqrt((x[i+1]-x[i-1])**2+(y[i+1]-y[i-1])**2+(z[i+1]-z[i-1])**2) for i in range(1,len(x)-1)])
y_deriv = array([(y[i+1]-y[i-1])/sqrt((x[i+1]-x[i-1])**2+(y[i+1]-y[i-1])**2+(z[i+1]-z[i-1])**2) for i in range(1,len(y)-1)])
z_deriv = array([(z[i+1]-z[i-1])/sqrt((x[i+1]-x[i-1])**2+(y[i+1]-y[i-1])**2+(z[i+1]-z[i-1])**2) for i in range(1,len(z)-1)])
x_deriv = insert(x_deriv, 0, (x[1]-x[0])/sqrt((x[1]-x[0])**2+(y[1]-y[0])**2+(z[1]-z[0])**2))
x_deriv = append(x_deriv, (x[-1]-x[-2])/sqrt((x[-1]-x[-2])**2+(y[-1]-y[-2])**2+(z[-1]-z[-2])**2))
#print len(x_deriv)
y_deriv = insert(y_deriv, 0, (y[1]-y[0])/sqrt((x[1]-x[0])**2+(y[1]-y[0])**2+(z[1]-z[0])**2))
y_deriv = append(y_deriv, (y[-1]-y[-2])/sqrt((x[-1]-x[-2])**2+(y[-1]-y[-2])**2+(z[-1]-z[-2])**2))
z_deriv = insert(z_deriv, 0, (z[1]-z[0])/sqrt((x[1]-x[0])**2+(y[1]-y[0])**2+(z[1]-z[0])**2))
z_deriv = append(z_deriv, (z[-1]-z[-2])/sqrt((x[-1]-x[-2])**2+(y[-1]-y[-2])**2+(z[-1]-z[-2])**2))
return x_deriv, y_deriv, z_deriv
#=======================================================================================================================
# Non parametric regression
#=======================================================================================================================
def non_parametric(x,y,f = 0.25,iter = 3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
https://gist.github.com/agramfort/850437 """
from math import ceil
from scipy import linalg
from numpy import sort, abs, zeros, ones, array, sum, median, clip
n = len(x)
r = int(ceil(f*n))
h = [sort(abs(x - x[i]))[r] for i in range(n)]
w = clip(abs((x[:,None] - x[None,:]) / h), 0.0, 1.0)
w = (1 - w**3)**3
yest = zeros(n)
delta = ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:,i]
b = array([sum(weights*y), sum(weights*y*x)])
A = array([[sum(weights), sum(weights*x)],
[sum(weights*x), sum(weights*x*x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1]*x[i]
residuals = y - yest
s = median(abs(residuals))
delta = clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta**2)**2
return yest
#=======================================================================================================================
# TODO: ADD DESCRIPTION
#=======================================================================================================================
def opt_f(x, y, z):
from numpy import max, mean, linalg
print 'optimizing f parameter in non-parametric...'
f_list = [0.1, 0.15, 0.20, 0.22, 0.25, 0.3, 0.35, 0.40, 0.45, 0.5]
msx_min = 2
msy_min = 2
f_opt_y = 5
f_opt_x = 5
for f in f_list:
try:
x_fit = non_parametric(z, x, f)
y_fit = non_parametric(z, y, f)
msex = mean_squared_error(x, x_fit)
msey = mean_squared_error(y, y_fit)
if msex < msx_min:
msx_min = msex
f_opt_x = f
if msey < msy_min:
msy_min = msey
f_opt_y = f
x_fit_d, y_fit_d, z_d = evaluate_derivative_3D(x_fit, y_fit, z)
x_fit_dd, y_fit_dd, z_dd = evaluate_derivative_3D(x_fit_d, y_fit_d, z_d)
amp_xd = max(abs(x_fit_dd))
amp_yd = max(abs(y_fit_dd))
mean_xd = mean(x_fit_dd)
mean_yd = mean(y_fit_dd)
mean = mean_xd + mean_yd
# ax = plt.subplot(1,2,1)
# plt.plot(z, x_fit, 'b-', label='centerline')
# plt.plot(z, x_fit_d, 'r-', label='deriv')
# plt.plot(z, x_fit_dd, 'y-', label='derivsec')
# plt.xlabel('x')
# plt.ylabel('z')
# ax = plt.subplot(1,2,2)
# plt.plot(z, y_fit, 'b-', label='centerline')
# plt.plot(z, y_fit_d, 'r-', label='deriv')
# plt.plot(z, y_fit_dd, 'r-', label='fit')
# plt.xlabel('y')
# plt.ylabel('z')
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles, labels)
# plt.show()
print 'AMP', amp_xd, amp_yd
print 'MEAN', mean_xd, mean_yd, mean
except linalg.linalg.LinAlgError:
print 'LinAlgError raised'
print msx_min, f_opt_x
print msy_min, f_opt_y
return f_opt_x, f_opt_y
#=======================================================================================================================
# Univariate Spline fitting
#=======================================================================================================================
def Univariate_Spline(x, y, w=None, bbox=[None, None], k=3, s=None) :
from scipy.interpolate import UnivariateSpline
s = UnivariateSpline(x, y, w, bbox, k, s)
ys = s(x)
return ys
#=======================================================================================================================
# 3D B-Spline function, sct_nurbs
#=======================================================================================================================
#def b_spline_nurbs(x, y, z, control_points=0, degree=3,point_number=3000):
def b_spline_nurbs(x, y, z, fname_centerline=None, degree=3, point_number=3000, nbControl=-1, verbose=1):
from math import log
from msct_nurbs import NURBS
"""x.reverse()
y.reverse()
z.reverse()"""
sct.printv('\nFitting centerline using B-spline approximation...', verbose)
data = [[x[n], y[n], z[n]] for n in range(len(x))]
# if control_points == 0:
# nurbs = NURBS(degree, point_number, data) # BE very careful with the spline order that you choose : if order is too high ( > 4 or 5) you need to set a higher number of Control Points (cf sct_nurbs ). For the third argument (number of points), give at least len(z_centerline)+500 or higher
# else:
# print 'In b_spline_nurbs we get control_point = ', control_points
# nurbs = NURBS(degree, point_number, data, False, control_points)
if nbControl == -1:
centerlineSize = getSize(x, y, z, fname_centerline)
nbControl = 30*log(centerlineSize, 10) - 42
nbControl = round(nbControl)
nurbs = NURBS(degree, point_number, data, False, nbControl, verbose)
P = nurbs.getCourbe3D()
x_fit=P[0]
y_fit=P[1]
z_fit=P[2]
Q = nurbs.getCourbe3D_deriv()
x_deriv=Q[0]
y_deriv=Q[1]
z_deriv=Q[2]
"""x_fit = x_fit[::-1]
y_fit = x_fit[::-1]
z_fit = x_fit[::-1]
x_deriv = x_fit[::-1]
y_deriv = x_fit[::-1]
z_deriv = x_fit[::-1]"""
PC = nurbs.getControle()
PC_x = [p[0] for p in PC]
PC_y = [p[1] for p in PC]
PC_z = [p[2] for p in PC]
if verbose == 2:
import matplotlib.pyplot as plt
plt.figure(1)
#ax = plt.subplot(211)
plt.subplot(211)
plt.plot(z, x, 'r.')
plt.plot(z_fit, x_fit)
plt.plot(PC_z,PC_x,'go')
plt.title("X")
#ax.set_aspect('equal')
plt.xlabel('z')
plt.ylabel('x')
#ay = plt.subplot(212)
plt.subplot(212)
plt.plot(z, y, 'r.')
plt.plot(z_fit, y_fit)
plt.plot(PC_z,PC_y,'go')
plt.title("Y")
#ay.set_aspect('equal')
plt.xlabel('z')
plt.ylabel('y')
plt.show()
return x_fit, y_fit, z_fit, x_deriv, y_deriv, z_deriv
#=======================================================================================================================
# 3D B-Spline function using ITK
#=======================================================================================================================
def b_spline_nurbs_itk(fname_centerline, numberOfLevels=10):
print '\nFitting centerline using B-spline approximation (using ITK)...'
import sct_utils as sct
status, output = sct.run("isct_bsplineapproximator -i "+fname_centerline+" -o tmp.centerline.txt -l "+str(numberOfLevels))
if (status != 0):
print "WARNING: \n"+output
f = open('tmp.centerline.txt', 'r')
x_fit = []
y_fit = []
z_fit = []
x_deriv = []
y_deriv = []
z_deriv = []
for line in f:
center = line.split(' ')
x_fit.append(float(center[0]))
y_fit.append(float(center[1]))
z_fit.append(float(center[2]))
x_deriv.append(float(center[3]))
y_deriv.append(float(center[4]))
z_deriv.append(float(center[5]))
return x_fit, y_fit, z_fit, x_deriv, y_deriv, z_deriv
#=======================================================================================================================
# get size
#=======================================================================================================================
def getSize(x, y, z, file_name=None):
from commands import getstatusoutput
from math import sqrt
# get pixdim
if file_name is not None:
cmd1 = 'fslval '+file_name+' pixdim1'
status, output = getstatusoutput(cmd1)
p1 = float(output)
cmd2 = 'fslval '+file_name+' pixdim2'
status, output = getstatusoutput(cmd2)
p2 = float(output)
cmd3 = 'fslval '+file_name+' pixdim3'
status, output = getstatusoutput(cmd3)
p3 = float(output)
else:
p1, p2, p3 = 1.0, 1.0, 1.0
# Centerline size
s = 0
for i in xrange (len(x)-1):
s += sqrt((p1*(x[i+1]-x[i]))**2+(p2*(y[i+1]-y[i]))**2+(p3*(z[i+1]-z[i])**2))
#print "centerline size: ", s
return s
#=======================================================================================================================
# functions to get centerline size
#=======================================================================================================================
def getPxDimensions(file_name):
from commands import getstatusoutput
cmd1 = 'fslval '+file_name+' pixdim1'
status, output = getstatusoutput(cmd1)
p1 = float(output)
cmd2 = 'fslval '+file_name+' pixdim2'
status, output = getstatusoutput(cmd2)
p2 = float(output)
cmd3 = 'fslval '+file_name+' pixdim3'
status, output = getstatusoutput(cmd3)
p3 = float(output)
return p1, p2, p3
#=======================================================================================================================
# 3D B-Spline function, python function
#=======================================================================================================================
def b_spline_python(x, y, z, s = 0, k = 3, nest = -1):
"""see http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splprep.html for full input information"""
from scipy.interpolate import splprep, splev
tckp, u = splprep([x,y,z], s = s, k = k, nest = nest)
xnew, ynew, znew = splev(u, tckp)
return xnew, ynew, znew
#=======================================================================================================================
# lowpass filter
#=======================================================================================================================
def lowpass(y):
"""Signal smoothing by low pass filtering.
This method is based on the application of a butterworth low pas filter of order 5 to the signal. It skims out the
higher frequencies that are responsible for abrupt changes thus smoothing the signal. Output edges are different
from input edges.
input:
y: input signal (type: list)
output:
y_smooth : filtered signal (type: ndarray)
"""
from scipy.fftpack import fftfreq, fft
from scipy.signal import filtfilt, iirfilter
from numpy import abs, amax
frequency = fftfreq(len(y))
spectrum = abs(fft(y, n=None, axis=-1, overwrite_x=False))
Wn = amax(frequency)/10
N = 5 # Order of the filter
b, a = iirfilter(N, Wn, rp=None, rs=None, btype='low', analog=False, ftype='butter', output='ba')
y_smooth = filtfilt(b, a, y, axis=-1, padtype=None)
return y_smooth
#=======================================================================================================================
# moving_average
#=======================================================================================================================
def moving_average(y, n=3):
from numpy import cumsum
y_smooth = cumsum(y, dtype=float)
y_smooth[n:] = y_smooth[n:] - y_smooth[:-n]
return y_smooth[n - 1:] / n
#=======================================================================================================================
# moving_average
#=======================================================================================================================
def mean_squared_error(x, x_fit):
mse = 0
if len(x_fit) == len(x) and len(x) is not 0:
n = len(x)
for i in range(0, len(x)):
mse += (x[i]-x_fit[i])*(x[i]-x_fit[i])
mse = float(mse)
mse *= (1/float(n))
return mse
else:
print "cannot calculate the mean squared error, check if the argument have the same length. \n"
#=======================================================================================================================
# windowing
#=======================================================================================================================
def smoothing_window(x, window_len=11, window='hanning', verbose = 0, robust=0):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal (type: array)
window_len: the dimension of the smoothing window (in number of points); should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
y: the smoothed signal (type: array)
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smoothing_window(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
from numpy import append, insert, ones, convolve, hanning # IMPORTANT: here, we only import hanning. For more windows, add here.
from math import ceil, floor
import sct_utils as sct
# outlier detection
if robust:
mask = outliers_detection(x)
x = outliers_completion(mask)
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
# if x.size < window_len:
# raise ValueError, "Input vector needs to be bigger than window size."
if window_len < 3:
sct.printv('Window size is too small. No smoothing was applied.', verbose=verbose, type='warning')
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window can only be the following: 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
## Checking the window's size
nb_points = x.shape[0]
#The number of points of the curve must be superior to int(window_length/(2.0*pz))
if window_len > int(nb_points):
window_len = int(nb_points)
sct.printv("WARNING: The smoothing window is larger than the number of points. New value: "+str(window_len), verbose=verbose, type='warning')
# make window_len as odd integer (x = x+1 if x is even)
window_len_int = ceil((floor(window_len) + 1)/2)*2 - 1
# s = r_[x[window_len_int-1:0:-1], x, x[-1:-window_len_int:-1]]
# Creation of the window
if window == 'flat': #moving average
w = ones(window_len, 'd')
else:
w = eval(window+'(window_len_int)')
##Implementation of an extended curve to apply the smoothing on in order to avoid edge effects
# Extend the curve before smoothing
x_extended = x
size_curve = x.shape[0]
size_padding = int(round((window_len_int-1)/2.0))
for i in range(size_padding):
x_extended = append(x_extended, 2*x[-1] - x[-2-i])
x_extended = insert(x_extended, 0, 2*x[0] - x[i+1])
# Convolution of the window with the extended signal
y = convolve(x_extended, w/w.sum(), mode='valid')
# Display smoothing
if verbose == 2:
import matplotlib.pyplot as plt
from copy import copy
z_extended = [i for i in range(x_extended.shape[0])]
# Create x_display to visualize concording results
x_display = copy(x_extended)
for i in range(size_padding - 1):
x_display[i] = 0
x_display[-i-1] = 0
plt.figure()
pltx_ext, = plt.plot(z_extended, x_extended, 'go')
pltx, = plt.plot(z_extended[size_padding:size_padding + size_curve], x_display[size_padding:size_padding + size_curve], 'bo')
pltx_fit, = plt.plot(z_extended[size_padding:size_padding + size_curve], y, 'r', linewidth=2)
plt.title("Type of window: %s Window_length= %d mm" % (window, window_len))
plt.xlabel('z')
plt.ylabel('x')
plt.legend([pltx_ext, pltx, pltx_fit], ['Extended', 'Normal', 'Smoothed'])
plt.show()
return y
def outliers_detection(data, type='median', factor=2, return_filtered_signal='no', verbose=0):
"""Detect outliers within a signal.
This method is based on the comparison of the distance between points of the signal and the mean of the signal.
There are two types of detection process.
-'std' process compares the distance between the mean of the signal
and the points of the signal with the std. If the distance is superior to factor * std than the point is considered
as an outlier
-'median' process first detect extreme outliers using the Median Absolute Deviation (MAD) and then calculate the
std with the filtered signal (i.e. the signal without the extreme outliers). It then uses the same process as the
'std' process comparing the distance between the mean and the points to the std. The idea beneath is that the
calculation of the std is biased by the presence of the outliers; retrieving extreme ones before hand improves
the accuracy of the algorithm. (http://eurekastatistics.com/using-the-median-absolute-deviation-to-find-outliers)
input:
data: the input signal (type: array)
type: the type of algorithm process ('median' or 'std') (type: string)
factor: the sensibility of outlier detection (if infinite no outlier will be find) (type: int or float)
return_filtered_signal: option to ask for the 'filtered signal' (i.e. the signal of smaller shape that present
no outliers) ('yes' or 'no')
verbose: display parameter; specify 'verbose = 2' if display is desired (type: int)
output:
mask: a mask of same shape as the input signal that takes same values for non outlier points and 'nan' values for
outliers (type: array)
filtered [optional]: signal of smaller shape that present no outliers (type: array)
TODO: other outlier detection algorithms could be implemented
"""
from numpy import mean, median, std, isnan, asarray
from copy import copy
if type == 'std':
u = mean(data)
s = std(data)
index_1 = data > (u + factor * s)
index_2 = (u - factor * s) > data
filtered = [e for e in data if (u - factor * s < e < u + factor * s)]
mask = copy(data)
mask[index_1] = None
mask[index_2] = None
if type == 'median':
# Detect extrem outliers using median
d = abs(data - median(data))
mdev = 1.4826 * median(d)
s = d/mdev if mdev else 0.
mean_s = mean(s)
index_1 = s>5* mean_s
mask_1 = copy(data)
mask_1[index_1] = None
filtered_1 = [e for i,e in enumerate(data.tolist()) if not isnan(mask_1[i])]
# Recalculate std using filtered variable and detect outliers with threshold factor * std
u = mean(filtered_1)
std_1 = std(filtered_1)
filtered = [e for e in data if (u - factor * std_1 < e < u + factor * std_1)]
index_1_2 = data > (u + factor * std_1)
index_2_2 = (u - factor * std_1) > data
mask = copy(data)
mask[index_1_2] = None
mask[index_2_2] = None
if verbose==2:
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot(data, 'bo')
axes = plt.gca()
y_lim = axes.get_ylim()
plt.title("Before outliers deletion")
plt.subplot(212)
plt.plot(mask, 'bo')
plt.ylim(y_lim)
plt.title("After outliers deletion")
plt.show()
if return_filtered_signal == 'yes:':
filtered = asarray(filtered)
return filtered, mask
else:
return mask
def outliers_completion(mask, verbose=0):
"""Replace outliers within a signal.
This method is based on the replacement of outlier using linear interpolation with closest non outlier points by
recurrence. We browse through the signal from 'left to right' replacing each outlier by the average of the two
closest non outlier points. Once one outlier is replaced, it is no longer consider as an outlier and may be used for
the calculation of the replacement value of the next outlier (recurrence process).
To be used after outlier_detection.
input:
mask: the input signal (type: array) that takes 'nan' values at the position of the outlier to be retrieved
verbose: display parameters; specify 'verbose = 2' if display is desired (type: int)
output:
signal_completed: the signal of input that has been completed (type: array)
example:
mask_x = outliers_detection(x, type='median', factor=factor, return_filtered_signal='no', verbose=0)
x_no_outliers = outliers_completion(mask_x, verbose=0)
N.B.: this outlier replacement technique is not a good statistical solution. Our desire of replacing outliers comes
from the fact that we need to treat data of same shape but by doing so we are also flawing the signal.
"""
from numpy import nan_to_num, nonzero, transpose, append, insert, isnan
# Complete mask that as nan values by linear interpolation of the closest points
#Define signal completed
signal_completed = nan_to_num(mask)
# take index of all non nan points
X_signal_completed = nonzero(signal_completed)
X_signal_completed = transpose(X_signal_completed)
#initialization: we set the extrem values to avoid edge effects
if len(X_signal_completed) != 0:
signal_completed[0] = signal_completed[X_signal_completed[0]]
signal_completed[-1] = signal_completed[X_signal_completed[-1]]
#Add two rows to the vector X_signal_completed:
# one before as signal_completed[0] is now diff from 0
# one after as signal_completed[-1] is now diff from 0
X_signal_completed = append(X_signal_completed, len(signal_completed)-1)
X_signal_completed = insert(X_signal_completed, 0, 0)
#linear interpolation
#count_zeros=0
for i in range(1,len(signal_completed)-1):
if signal_completed[i]==0:
#signal_completed[i] = ((X_signal_completed[i-count_zeros]-i) * signal_completed[X_signal_completed[i-1-count_zeros]] + (i-X_signal_completed[i-1-count_zeros]) * signal_completed[X_signal_completed[i-count_zeros]])/float(X_signal_completed[i-count_zeros]-X_signal_completed[i-1-count_zeros]) # linear interpolation ponderate by distance with closest non zero points
#signal_completed[i] = 0.25 * (signal_completed[X_signal_completed[i-1-count_zeros]] + signal_completed[X_signal_completed[i-count_zeros]] + signal_completed[X_signal_completed[i-2-count_zeros]] + signal_completed[X_signal_completed[i-count_zeros+1]]) # linear interpolation with closest non zero points (2 points on each side)
signal_completed[i] = 0.5 * (signal_completed[X_signal_completed[i-1]] + signal_completed[X_signal_completed[i]]) # linear interpolation with closest non zero points
#redefine X_signal_completed
X_signal_completed = nonzero(signal_completed)
X_signal_completed = transpose(X_signal_completed)
#count_zeros += 1
if verbose==2:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(2,1,1)
plt.plot(mask, 'bo')
plt.title("Before outliers completion")
axes = plt.gca()
y_lim = axes.get_ylim()
plt.subplot(2,1,2)
plt.plot(signal_completed, 'bo')
plt.title("After outliers completion")
plt.ylim(y_lim)
plt.show()
return signal_completed | mit | 1,639,657,995,958,790,400 | 40.612188 | 377 | 0.509337 | false |
watchdogpolska/feder | feder/cases/filters.py | 1 | 2954 | import django_filters
from dal import autocomplete
from django import forms
from django.utils.translation import ugettext_lazy as _
from teryt_tree.dal_ext.filters import VoivodeshipFilter, CountyFilter, CommunityFilter
from .models import Case
from feder.main.mixins import DisabledWhenFilterSetMixin
from feder.teryt.filters import (
DisabledWhenVoivodeshipFilter,
DisabledWhenCountyFilter,
DisabledWhenCommunityFilter,
)
from feder.cases_tags.models import Tag
from feder.monitorings.models import Monitoring
class CaseFilter(DisabledWhenFilterSetMixin, django_filters.FilterSet):
created = django_filters.DateRangeFilter(label=_("Creation date"))
voivodeship = DisabledWhenVoivodeshipFilter()
county = DisabledWhenCountyFilter()
community = DisabledWhenCommunityFilter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["name"].lookup_expr = "icontains"
self.filters["name"].label = _("Name")
self.filters["monitoring"].field.widget = autocomplete.ModelSelect2(
url="monitorings:autocomplete"
)
self.filters["institution"].field.widget = autocomplete.ModelSelect2(
url="institutions:autocomplete"
)
class Meta:
model = Case
fields = [
"name",
"monitoring",
"institution",
"created",
"confirmation_received",
"response_received",
]
class CaseReportFilter(django_filters.FilterSet):
monitoring = django_filters.ModelChoiceFilter(queryset=Monitoring.objects.all())
name = django_filters.CharFilter(
label=_("Institution name"),
field_name="institution__name",
lookup_expr="icontains",
)
voivodeship = VoivodeshipFilter(
widget=autocomplete.ModelSelect2(url="teryt:voivodeship-autocomplete")
)
county = CountyFilter(
widget=autocomplete.ModelSelect2(
url="teryt:county-autocomplete", forward=["voivodeship"]
)
)
community = CommunityFilter(
widget=autocomplete.ModelSelect2(
url="teryt:community-autocomplete", forward=["county"]
)
)
tags = django_filters.ModelMultipleChoiceFilter(
label=_("Tags"), field_name="tags", widget=forms.CheckboxSelectMultiple
)
def __init__(self, data=None, queryset=None, *, request=None, prefix=None):
super().__init__(data, queryset, request=request, prefix=prefix)
case = queryset.first()
self.filters["tags"].queryset = (
Tag.objects.for_monitoring(case.monitoring) if case else Tag.objects.none()
)
class Meta:
model = Case
fields = [
"monitoring",
"name",
"voivodeship",
"county",
"community",
"tags",
"confirmation_received",
"response_received",
]
| mit | -3,434,448,243,723,590,000 | 32.191011 | 87 | 0.641842 | false |
NoneGG/aredis | tests/client/test_scripting.py | 1 | 4923 | from __future__ import with_statement
import pytest
from aredis.exceptions import (NoScriptError,
ResponseError)
from aredis.utils import b
multiply_script = """
local value = redis.call('GET', KEYS[1])
value = tonumber(value)
return value * ARGV[1]"""
msgpack_hello_script = """
local message = cmsgpack.unpack(ARGV[1])
local name = message['name']
return "hello " .. name
"""
msgpack_hello_script_broken = """
local message = cmsgpack.unpack(ARGV[1])
local names = message['name']
return "hello " .. name
"""
class TestScripting:
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_eval(self, r):
await r.flushdb()
await r.set('a', 2)
# 2 * 3 == 6
assert await r.eval(multiply_script, 1, 'a', 3) == 6
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_evalsha(self, r):
await r.set('a', 2)
sha = await r.script_load(multiply_script)
# 2 * 3 == 6
assert await r.evalsha(sha, 1, 'a', 3) == 6
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_evalsha_script_not_loaded(self, r):
await r.set('a', 2)
sha = await r.script_load(multiply_script)
# remove the script from Redis's cache
await r.script_flush()
with pytest.raises(NoScriptError):
await r.evalsha(sha, 1, 'a', 3)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_script_loading(self, r):
# get the sha, then clear the cache
sha = await r.script_load(multiply_script)
await r.script_flush()
assert await r.script_exists(sha) == [False]
await r.script_load(multiply_script)
assert await r.script_exists(sha) == [True]
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_script_object(self, r):
await r.script_flush()
await r.set('a', 2)
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
assert await r.script_exists(multiply.sha) == [False]
# Test second evalsha block (after NoScriptError)
assert await multiply.execute(keys=['a'], args=[3]) == 6
# At this point, the script should be loaded
assert await r.script_exists(multiply.sha) == [True]
# Test that the precalculated sha matches the one from redis
assert multiply.sha == precalculated_sha
# Test first evalsha block
assert await multiply.execute(keys=['a'], args=[3]) == 6
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_script_object_in_pipeline(self, r):
await r.script_flush()
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
pipe = await r.pipeline()
await pipe.set('a', 2)
await pipe.get('a')
await multiply.execute(keys=['a'], args=[3], client=pipe)
assert await r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
assert await pipe.execute() == [True, b('2'), 6]
# The script should have been loaded by pipe.execute()
assert await r.script_exists(multiply.sha) == [True]
# The precalculated sha should have been the correct one
assert multiply.sha == precalculated_sha
# purge the script from redis's cache and re-run the pipeline
# the multiply script should be reloaded by pipe.execute()
await r.script_flush()
pipe = await r.pipeline()
await pipe.set('a', 2)
await pipe.get('a')
await multiply.execute(keys=['a'], args=[3], client=pipe)
assert await r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
assert await pipe.execute() == [True, b('2'), 6]
assert await r.script_exists(multiply.sha) == [True]
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_eval_msgpack_pipeline_error_in_lua(self, r):
msgpack_hello = r.register_script(msgpack_hello_script)
assert msgpack_hello.sha
pipe = await r.pipeline()
# avoiding a dependency to msgpack, this is the output of
# msgpack.dumps({"name": "joe"})
msgpack_message_1 = b'\x81\xa4name\xa3Joe'
await msgpack_hello.execute(args=[msgpack_message_1], client=pipe)
assert await r.script_exists(msgpack_hello.sha) == [False]
assert (await pipe.execute())[0] == b'hello Joe'
assert await r.script_exists(msgpack_hello.sha) == [True]
msgpack_hello_broken = r.register_script(msgpack_hello_script_broken)
await msgpack_hello_broken.execute(args=[msgpack_message_1], client=pipe)
with pytest.raises(ResponseError) as excinfo:
await pipe.execute()
assert excinfo.type == ResponseError
| mit | 5,325,884,767,146,094,000 | 37.162791 | 81 | 0.629494 | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/extractors/vidble_extractor.py | 1 | 3356 | """
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
from bs4 import BeautifulSoup
from .base_extractor import BaseExtractor
from ..core.errors import Error
from ..core import const
class VidbleExtractor(BaseExtractor):
url_key = ['vidble']
def __init__(self, post, **kwargs):
"""
A subclass of the BaseExtractor class. This class interacts exclusively with the Vidble website via
BeautifulSoup4.
"""
super().__init__(post, **kwargs)
self.vidble_base = "https://vidble.com"
def extract_content(self):
try:
if '/album/' in self.url:
self.extract_album()
else:
# We can convert show and explore links to single links by removing the show/explore from the url
self.url = self.url.replace('/show/', '/').replace('/explore/', '/')
if self.url.lower().endswith(const.ALL_EXT):
self.extract_direct_link()
else:
self.extract_single()
except Exception:
message = 'Failed to locate content'
self.handle_failed_extract(error=Error.FAILED_TO_LOCATE, message=message, extractor_error_message=message)
def get_imgs(self):
soup = BeautifulSoup(self.get_text(self.url), 'html.parser')
return soup.find_all('img')
def extract_single(self):
domain, vidble_id = self.url.rsplit('/', 1)
# There should only be one image
img = self.get_imgs()[0]
# We only need to get the filename from the image
link = img.get('src')
if link is not None:
base, extension = link.rsplit('.', 1)
file_name = "{}.{}".format(vidble_id, extension)
url = self.vidble_base + '/' + file_name
self.make_content(url, extension, media_id=vidble_id)
def extract_album(self):
# We will use the undocumented API specified here:
# https://www.reddit.com/r/Enhancement/comments/29nik6/feature_request_inline_image_expandos_for_vidible/cinha50/
json = self.get_json(self.url + "?json=1")
pics = json['pics']
count = 1
for raw_pic in pics:
domain, file_name = raw_pic.rsplit('/', 1)
file_name = file_name.replace('_med', '')
base, extension = file_name.rsplit('.', 1)
url = "https:{}/{}".format(domain, file_name)
self.make_content(url, extension, count=count, media_id=base)
count += 1
| gpl-3.0 | -5,193,640,190,564,752,000 | 37.574713 | 121 | 0.632896 | false |
mozilla/socorro | webapp-django/crashstats/crashstats/tests/test_utils.py | 1 | 17515 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import json
import os
import os.path
import pytest
import requests
import requests_mock
from django.conf import settings
from django.http import HttpResponse
from django.utils.encoding import smart_text
from crashstats.crashstats import utils
def test_enhance_frame():
vcs_mappings = {
"hg": {
"hg.m.org": (
"http://hg.m.org/%(repo)s/annotate/%(revision)s/%(file)s#l%(line)s"
)
}
}
# Test with a file that uses a vcs_mapping.
# Also test function sanitizing.
actual = {
"frame": 0,
"module": "bad.dll",
"function": "Func(A * a,B b)",
"file": "hg:hg.m.org/repo/name:dname/fname:rev",
"line": 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"function": "Func(A* a, B b)",
"short_signature": "Func",
"line": 576,
"source_link": "http://hg.m.org/repo/name/annotate/rev/dname/fname#l576",
"file": "dname/fname",
"frame": 0,
"signature": "Func(A* a, B b)",
"module": "bad.dll",
}
assert actual == expected
# Now with a file that has VCS info but isn't in vcs_mappings.
actual = {
"frame": 0,
"module": "bad.dll",
"function": "Func",
"file": "git:git.m.org/repo/name:dname/fname:rev",
"line": 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"function": "Func",
"short_signature": "Func",
"line": 576,
"file": "fname",
"frame": 0,
"signature": "Func",
"module": "bad.dll",
}
assert actual == expected
# Test with no VCS info at all.
actual = {
"frame": 0,
"module": "bad.dll",
"function": "Func",
"file": "/foo/bar/file.c",
"line": 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"function": "Func",
"short_signature": "Func",
"line": 576,
"file": "/foo/bar/file.c",
"frame": 0,
"signature": "Func",
"module": "bad.dll",
}
assert actual == expected
# Test with no source info at all.
actual = {"frame": 0, "module": "bad.dll", "function": "Func"}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"function": "Func",
"short_signature": "Func",
"frame": 0,
"signature": "Func",
"module": "bad.dll",
}
assert actual == expected
# Test with no function info.
actual = {"frame": 0, "module": "bad.dll", "module_offset": "0x123"}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"short_signature": "bad.dll@0x123",
"frame": 0,
"signature": "bad.dll@0x123",
"module": "bad.dll",
"module_offset": "0x123",
}
assert actual == expected
# Test with no module info.
actual = {"frame": 0, "offset": "0x1234"}
utils.enhance_frame(actual, vcs_mappings)
expected = {
"short_signature": "@0x1234",
"frame": 0,
"signature": "@0x1234",
"offset": "0x1234",
}
assert actual == expected
def test_enhance_frame_s3_generated_sources():
"""Test a specific case when the frame references a S3 vcs
and the file contains a really long sha string"""
original_frame = {
"file": (
"s3:gecko-generated-sources:36d62ce2ec2925f4a13e44fe534b246c23b"
"4b3d5407884d3bbfc9b0d9aebe4929985935ae582704c06e994ece0d1e7652"
"8ff1edf4543e400d0aaa8f7251b15ca/ipc/ipdl/PCompositorBridgeChild.cpp:"
),
"frame": 22,
"function": (
"mozilla::layers::PCompositorBridgeChild::OnMessageReceived(IP"
"C::Message const&)"
),
"function_offset": "0xd9d",
"line": 1495,
"module": "XUL",
"module_offset": "0x7c50bd",
"normalized": "mozilla::layers::PCompositorBridgeChild::OnMessageReceived",
"offset": "0x108b7b0bd",
"short_signature": "mozilla::layers::PCompositorBridgeChild::OnMessageReceived",
"signature": (
"mozilla::layers::PCompositorBridgeChild::OnMessageReceived(IP"
"C::Message const&)"
),
"trust": "cfi",
}
# Remember, enhance_frame() mutates the dict.
frame = copy.copy(original_frame)
utils.enhance_frame(frame, {})
# Because it can't find a mapping in 'vcs_mappings', the frame's
# 'file', the default behavior is to extract just the file's basename.
frame["file"] = "PCompositorBridgeChild.cpp"
# Try again, now with 's3' in vcs_mappings.
frame = copy.copy(original_frame)
utils.enhance_frame(
frame,
{
"s3": {
"gecko-generated-sources": ("https://example.com/%(file)s#L-%(line)s")
}
},
)
# There's a new key in the frame now. This is what's used in the
# <a href> in the HTML.
assert frame["source_link"]
expected = (
"https://example.com/36d62ce2ec2925f4a13e44fe534b246c23b4b3d540788"
"4d3bbfc9b0d9aebe4929985935ae582704c06e994ece0d1e76528ff1edf4543e4"
"00d0aaa8f7251b15ca/ipc/ipdl/PCompositorBridgeChild.cpp#L-1495"
)
assert frame["source_link"] == expected
# And that links text is the frame's 'file' but without the 128 char
# sha.
assert frame["file"] == "ipc/ipdl/PCompositorBridgeChild.cpp"
def test_enhance_json_dump():
vcs_mappings = {
"hg": {
"hg.m.org": (
"http://hg.m.org/%(repo)s/annotate/%(revision)s/%(file)s#l%(line)s"
)
}
}
actual = {
"threads": [
{
"frames": [
{
"frame": 0,
"module": "bad.dll",
"function": "Func",
"file": "hg:hg.m.org/repo/name:dname/fname:rev",
"line": 576,
},
{
"frame": 1,
"module": "another.dll",
"function": "Func2",
"file": "hg:hg.m.org/repo/name:dname/fname:rev",
"line": 576,
},
]
},
{
"frames": [
{
"frame": 0,
"module": "bad.dll",
"function": "Func",
"file": "hg:hg.m.org/repo/name:dname/fname:rev",
"line": 576,
},
{
"frame": 1,
"module": "another.dll",
"function": "Func2",
"file": "hg:hg.m.org/repo/name:dname/fname:rev",
"line": 576,
},
]
},
]
}
utils.enhance_json_dump(actual, vcs_mappings)
expected = {
"threads": [
{
"thread": 0,
"frames": [
{
"frame": 0,
"function": "Func",
"short_signature": "Func",
"line": 576,
"source_link": (
"http://hg.m.org/repo/name/annotate/rev/dname/fname#l576"
),
"file": "dname/fname",
"signature": "Func",
"module": "bad.dll",
},
{
"frame": 1,
"module": "another.dll",
"function": "Func2",
"signature": "Func2",
"short_signature": "Func2",
"source_link": (
"http://hg.m.org/repo/name/annotate/rev/dname/fname#l576"
),
"file": "dname/fname",
"line": 576,
},
],
},
{
"thread": 1,
"frames": [
{
"frame": 0,
"function": "Func",
"short_signature": "Func",
"line": 576,
"source_link": (
"http://hg.m.org/repo/name/annotate/rev/dname/fname#l576"
),
"file": "dname/fname",
"signature": "Func",
"module": "bad.dll",
},
{
"frame": 1,
"module": "another.dll",
"function": "Func2",
"signature": "Func2",
"short_signature": "Func2",
"source_link": (
"http://hg.m.org/repo/name/annotate/rev/dname/fname#l576"
),
"file": "dname/fname",
"line": 576,
},
],
},
]
}
assert actual == expected
def test_find_crash_id():
# A good string, no prefix
input_str = "1234abcd-ef56-7890-ab12-abcdef130802"
crash_id = utils.find_crash_id(input_str)
assert crash_id == input_str
# A good string, with prefix
input_str = "bp-1234abcd-ef56-7890-ab12-abcdef130802"
crash_id = utils.find_crash_id(input_str)
assert crash_id == "1234abcd-ef56-7890-ab12-abcdef130802"
# A good looking string but not a real day
input_str = "1234abcd-ef56-7890-ab12-abcdef130230" # Feb 30th 2013
assert not utils.find_crash_id(input_str)
input_str = "bp-1234abcd-ef56-7890-ab12-abcdef130230"
assert not utils.find_crash_id(input_str)
# A bad string, one character missing
input_str = "bp-1234abcd-ef56-7890-ab12-abcdef12345"
assert not utils.find_crash_id(input_str)
# A bad string, one character not allowed
input_str = "bp-1234abcd-ef56-7890-ab12-abcdef12345g"
assert not utils.find_crash_id(input_str)
# Close but doesn't end with 6 digits
input_str = "f48e9617-652a-11dd-a35a-001a4bd43ed6"
assert not utils.find_crash_id(input_str)
# A random string that does not match
input_str = "somerandomstringthatdoesnotmatch"
assert not utils.find_crash_id(input_str)
def test_json_view_basic(rf):
request = rf.get("/")
def func(request):
return {"one": "One"}
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.loads(response.content) == {"one": "One"}
assert response.status_code == 200
def test_json_view_indented(rf):
request = rf.get("/?pretty=print")
def func(request):
return {"one": "One"}
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.dumps({"one": "One"}, indent=2) == smart_text(response.content)
assert response.status_code == 200
def test_json_view_already_httpresponse(rf):
request = rf.get("/")
def func(request):
return HttpResponse("something")
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert smart_text(response.content) == "something"
assert response.status_code == 200
def test_json_view_custom_status(rf):
request = rf.get("/")
def func(request):
return {"one": "One"}, 403
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.loads(response.content) == {"one": "One"}
assert response.status_code == 403
class TestRenderException:
def test_basic(self):
html = utils.render_exception("hi!")
assert html == "<ul><li>hi!</li></ul>"
def test_escaped(self):
html = utils.render_exception("<hi>")
assert html == "<ul><li><hi></li></ul>"
def test_to_string(self):
try:
raise NameError("<hack>")
except NameError as exc:
html = utils.render_exception(exc)
assert html == "<ul><li><hack></li></ul>"
class TestUtils:
def test_SignatureStats(self):
signature = {
"count": 2,
"term": "EMPTY: no crashing thread identified; ERROR_NO_MINIDUMP_HEADER",
"facets": {
"histogram_uptime": [{"count": 2, "term": 0}],
"startup_crash": [{"count": 2, "term": "F"}],
"cardinality_install_time": {"value": 1},
"is_garbage_collecting": [],
"process_type": [],
"platform": [{"count": 2, "term": ""}],
"hang_type": [{"count": 2, "term": 0}],
},
}
platforms = [
{"short_name": "win", "name": "Windows"},
{"short_name": "mac", "name": "Mac OS X"},
{"short_name": "lin", "name": "Linux"},
{"short_name": "unknown", "name": "Unknown"},
]
signature_stats = utils.SignatureStats(
signature=signature,
rank=1,
num_total_crashes=2,
platforms=platforms,
previous_signature=None,
)
assert signature_stats.rank == 1
assert (
signature_stats.signature_term
== "EMPTY: no crashing thread identified; ERROR_NO_MINIDUMP_HEADER"
)
assert signature_stats.percent_of_total_crashes == 100.0
assert signature_stats.num_crashes == 2
assert signature_stats.num_crashes_per_platform == {
"mac_count": 0,
"lin_count": 0,
"win_count": 0,
}
assert signature_stats.num_crashes_in_garbage_collection == 0
assert signature_stats.num_installs == 1
assert signature_stats.num_crashes == 2
assert signature_stats.num_startup_crashes == 0
assert signature_stats.is_startup_crash == 0
assert signature_stats.is_potential_startup_crash == 0
assert signature_stats.is_startup_window_crash is True
assert signature_stats.is_hang_crash is False
assert signature_stats.is_plugin_crash is False
def get_product_details_files():
product_details_path = os.path.join(settings.SOCORRO_ROOT, "product_details")
return [
os.path.join(product_details_path, fn)
for fn in os.listdir(product_details_path)
if fn.endswith(".json")
]
@pytest.mark.parametrize("fn", get_product_details_files())
def test_product_details_files(fn):
"""Validate product_details/ JSON files."""
fn_basename = os.path.basename(fn)
try:
with open(fn, "r") as fp:
data = json.load(fp)
except json.decoder.JSONDecoderError as exc:
raise Exception("%s: invalid JSON: %s" % (fn_basename, exc))
if "featured_versions" not in data:
raise Exception('%s: missing "featured_versions" key' % fn_basename)
if not isinstance(data["featured_versions"], list):
raise Exception(
'%s: "featured_versions" value is not a list of strings' % fn_basename
)
for item in data["featured_versions"]:
if not isinstance(item, str):
raise Exception("%s: value %r is not a str" % (fn_basename, item))
class Test_get_manually_maintained_featured_versions:
# Note that these tests are mocked and contrived and if the url changes or
# something along those lines, these tests can pass, but it might not work
# in stage/prod.
def test_exists(self):
with requests_mock.Mocker() as req_mock:
url = "%s/Firefox.json" % settings.PRODUCT_DETAILS_BASE_URL
req_mock.get(url, json={"featured_versions": ["68.0a1", "67.0b", "66.0"]})
featured = utils.get_manually_maintained_featured_versions("Firefox")
assert featured == ["68.0a1", "67.0b", "66.0"]
def test_invalid_file(self):
"""Not JSON or no featured_versions returns None."""
# Not JSON
with requests_mock.Mocker() as req_mock:
url = "%s/Firefox.json" % settings.PRODUCT_DETAILS_BASE_URL
req_mock.get(url, text="this is not json")
featured = utils.get_manually_maintained_featured_versions("Firefox")
assert featured is None
# No featured_versions
with requests_mock.Mocker() as req_mock:
url = "%s/Firefox.json" % settings.PRODUCT_DETAILS_BASE_URL
req_mock.get(url, json={})
featured = utils.get_manually_maintained_featured_versions("Firefox")
assert featured is None
def test_github_down(self):
"""Connection error returns None."""
with requests_mock.Mocker() as req_mock:
url = "%s/Firefox.json" % settings.PRODUCT_DETAILS_BASE_URL
req_mock.get(url, exc=requests.exceptions.ConnectionError)
featured = utils.get_manually_maintained_featured_versions("Firefox")
assert featured is None
| mpl-2.0 | -288,611,671,728,011,800 | 32.172348 | 88 | 0.522809 | false |
kahinton/Dark-Energy-UI-and-MC | Monte_Carlo.py | 1 | 4843 | from __future__ import division
import multiprocessing as mp
import os
import sys
from random import uniform
try:
import numpy as np
except ImportError:
print('This file requires the numpy package to run properly. Please see the readme for instructions on how to install this package.')
try:
from sympy import symbols, simplify, diff
except ImportError:
print('This file requires the sympy package to run properly. Please see the readme for instructions on how to install this package.')
from monte_pack import Universe, e1, e2, ParFile, ResFile, GroFile
###################################################################################
# Welcome to DE_mod_gen. This file allows you to create and test your own models
# of DE in the framework of a multi-field effective field theory. If you are
# interested in the functions being used they are contained in the universe.py
# file.
# Below all the information needed to run the Monte Carlo generator can be entered.
# Each value of lambda is calculated to minimize the number of failed models.
# Please note that growth data will only be correct for models which in which growth
# is unchanged from the standard GR solutions.
####################################################################################
# Please name the model you wish to run. This name will also be used in the UI.
name = 'Test_ws'
# Set the number of models to test. Note that only models which produce valid
# results will be presented in the UI, so the final number may be less than this
# amount
number = 1000
# The line below defines symbols needed for the function g, DO NOT CHANGE IT
Yi,ci,yi,xi,u,Lami = symbols('Yi ci yi xi u Lami')
# Define the function g. This is essentiallly the Lagrangian of the field
# divided by the kinetic energy of the field. For Quintessence this would be
#1-(ci/Yi), for Ghost Condensates the function is -1+(ci*Yi). To define your
#own please refer to the readme file or to Tsujikawa and Ohashi.
g = 1-(ci/Yi)
# Enter the total number of fields used in each model
fields = 20
# Enter the ranges for each parameter used in your model. The ranges given in the
# UI are the ranges supplied here. The x parameters are analogous to the kinetic
# energy of the fields,the y parameters are analogous to the potential energy,
# the c parameters are the coefficients of the assumed exponential potentials, and
# lastly the u parameter defines the initial density of radiation in the universe.
# Note that for the Ghost Condensate model you must make the string GC = "True"
# In this case the values of x2 and y2 are defined by x1 and y1 to make sure that
# instabilities do not arise
x1min,x1max = .001 , 0.3
y1min,y1max = .001 , 0.3
x2min,x2max = 10**(-14.0) , 10**(-12.0)
y2min,y2max = 10**(-14.0) , 10**(-12.0)
c1min,c1max = .001, 1.0
c2min,c2max = .001, .002
umin,umax = 0.7,0.9
GC = "False"
# If you like you can change the number of processes to split the models between, but
# by default this will just be the number of available processors on your computer
pros = mp.cpu_count()
##################################################################################
##################################################################################
# Do not change anything beyond this point!!!!!
##################################################################################
##################################################################################
g2 = simplify(diff(g, Yi))
g3 = simplify(diff(g2, Yi))
Aterm = simplify((g+(5*Yi*g2)+(2*(Yi**2)*g3))**-1.0)
loc = os.path.dirname(sys.argv[0])
ParFile(loc,name, str(g), (str(g2)), (str(g3)), (str(Aterm)), x1min, x1max,x2min, x2max, y1min, y1max,y2min, y2max,c1min, c1max,c2min, c2max, umin, umax, fields)
Tests = np.array([])
if GC != "True":
for num in xrange(number):
Q = Universe(uniform(x1min,x1max),uniform(x2min,x2max),uniform(y1min,y1max),uniform(y2min,y2max),uniform(c1min,c1max),uniform(c2min,c2max),uniform(umin,umax),fields,str(g),str(g2),str(g3),str(Aterm),e1,e2)
Tests = np.append(Tests, Q)
else:
for num in xrange(number):
x1 = uniform(x1min,x1max)
x2 = uniform(x2min,x2max)
Q = Universe(x1,x2,uniform(.5,1.0)*x1,uniform(.5,1.0)*x2,uniform(c1min,c1max),uniform(c2min,c2max),uniform(umin,umax),fields,str(g),str(g2),str(g3),str(Aterm),e1,e2)
Tests = np.append(Tests, Q)
def Runner(N):
N.Run(0)
return N
Range = np.arange(0,number,1)
if __name__=='__main__':
mp.freeze_support()
pool = mp.Pool()
Tests = pool.map(Runner,Tests)
pool.close()
pool.join()
Data = mp.Process(target = ResFile, args = (loc, name, fields, Tests))
Grow = mp.Process(target = GroFile, args = (loc, name, fields, Tests))
Data.start()
Data.join()
Grow.start()
Grow.join() | mit | -913,478,135,802,579,600 | 39.366667 | 214 | 0.636176 | false |
leapcode/bitmask-dev | src/leap/bitmask/keymanager/wrapper.py | 1 | 5570 | # -*- coding: utf-8 -*-
# wrapper.py
# Copyright (C) 2016 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GPG wrapper for temporary keyrings
"""
import os
import platform
import shutil
import tempfile
from gnupg import GPG
from twisted.logger import Logger
from leap.common.check import leap_assert
try:
from gnupg.gnupg import GPGUtilities
GNUPG_NG = True
except ImportError:
GNUPG_NG = False
class TempGPGWrapper(object):
"""
A context manager that wraps a temporary GPG keyring which only contains
the keys given at object creation.
"""
log = Logger()
def __init__(self, keys=None, gpgbinary=None):
"""
Create an empty temporary keyring and import any given C{keys} into
it.
:param keys: OpenPGP key, or list of.
:type keys: OpenPGPKey or list of OpenPGPKeys
:param gpgbinary: Name for GnuPG binary executable.
:type gpgbinary: C{str}
"""
self._gpg = None
self._gpgbinary = gpgbinary
if not keys:
keys = list()
if not isinstance(keys, list):
keys = [keys]
self._keys = keys
def __enter__(self):
"""
Build and return a GPG keyring containing the keys given on
object creation.
:return: A GPG instance containing the keys given on object creation.
:rtype: gnupg.GPG
"""
self._build_keyring()
return self._gpg
def __exit__(self, exc_type, exc_value, traceback):
"""
Ensure the gpg is properly destroyed.
"""
# TODO handle exceptions and log here
self._destroy_keyring()
def _build_keyring(self):
"""
Create a GPG keyring containing the keys given on object creation.
:return: A GPG instance containing the keys given on object creation.
:rtype: gnupg.GPG
"""
privkeys = [key for key in self._keys if key and key.private is True]
publkeys = [key for key in self._keys if key and key.private is False]
# here we filter out public keys that have a correspondent
# private key in the list because the private key_data by
# itself is enough to also have the public key in the keyring,
# and we want to count the keys afterwards.
privfps = map(lambda privkey: privkey.fingerprint, privkeys)
publkeys = filter(
lambda pubkey: pubkey.fingerprint not in privfps, publkeys)
listkeys = lambda: self._gpg.list_keys()
listsecretkeys = lambda: self._gpg.list_keys(secret=True)
try:
self._gpg = GPG(binary=self._gpgbinary,
homedir=tempfile.mkdtemp())
except TypeError:
# compat-mode with python-gnupg until windows
# support is fixed in gnupg-ng
self._gpg = GPG(gpgbinary=self._gpgbinary,
gnupghome=tempfile.mkdtemp(),
options=[])
leap_assert(len(listkeys()) is 0, 'Keyring not empty.')
# import keys into the keyring:
# concatenating ascii-armored keys, which is correctly
# understood by GPG.
self._gpg.import_keys("".join(
[x.key_data for x in publkeys + privkeys]))
# assert the number of keys in the keyring
leap_assert(
len(listkeys()) == len(publkeys) + len(privkeys),
'Wrong number of public keys in keyring: %d, should be %d)' %
(len(listkeys()), len(publkeys) + len(privkeys)))
leap_assert(
len(listsecretkeys()) == len(privkeys),
'Wrong number of private keys in keyring: %d, should be %d)' %
(len(listsecretkeys()), len(privkeys)))
def _destroy_keyring(self):
"""
Securely erase the keyring.
"""
# TODO: implement some kind of wiping of data or a more
# secure way that
# does not write to disk.
try:
for secret in [True, False]:
for key in self._gpg.list_keys(secret=secret):
self._gpg.delete_keys(
key['fingerprint'],
secret=secret)
leap_assert(len(self._gpg.list_keys()) is 0, 'Keyring not empty!')
except:
raise
finally:
try:
homedir = self._gpg.homedir
except AttributeError:
homedir = self._gpg.gnupghome
leap_assert(homedir != os.path.expanduser('~/.gnupg'),
"watch out! Tried to remove default gnupg home!")
# TODO some windows debug ....
homedir = os.path.normpath(homedir).replace("\\", "/")
homedir = str(homedir.replace("c:/", "c://"))
if platform.system() == "Windows":
self.log.error("BUG! Not erasing folder in Windows")
return
shutil.rmtree(homedir)
| gpl-3.0 | 3,536,690,499,557,021,000 | 33.171779 | 78 | 0.591921 | false |
tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/contrib/deadbeef.py | 1 | 5484 | # pylint: disable=C0111,R0903
"""Displays the current song being played in DeaDBeeF and provides
some media control bindings.
Left click toggles pause, scroll up skips the current song, scroll
down returns to the previous song.
Parameters:
* deadbeef.format: Format string (defaults to '{artist} - {title}')
Available values are: {artist}, {title}, {album}, {length},
{trackno}, {year}, {comment},
{copyright}, {time}
This is deprecated, but much simpler.
* deadbeef.tf_format: A foobar2000 title formatting-style format string.
These can be much more sophisticated than the standard
format strings. This is off by default, but specifying
any tf_format will enable it. If both deadbeef.format
and deadbeef.tf_format are specified, deadbeef.tf_format
takes priority.
* deadbeef.tf_format_if_stopped: Controls whether or not the tf_format format
string should be displayed even if no song is paused or
playing. This could be useful if you want to implement
your own stop strings with the built in logic. Any non-
null value will enable this (by default the module will
hide itself when the player is stopped).
* deadbeef.previous: Change binding for previous song (default is left click)
* deadbeef.next: Change binding for next song (default is right click)
* deadbeef.pause: Change binding for toggling pause (default is middle click)
Available options for deadbeef.previous, deadbeef.next and deadbeef.pause are:
LEFT_CLICK, RIGHT_CLICK, MIDDLE_CLICK, SCROLL_UP, SCROLL_DOWN
contributed by `joshbarrass <https://github.com/joshbarrass>`_ - many thanks!
"""
import sys
import subprocess
import logging
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
import util.format
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.deadbeef))
buttons = {
"LEFT_CLICK": core.input.LEFT_MOUSE,
"RIGHT_CLICK": core.input.RIGHT_MOUSE,
"MIDDLE_CLICK": core.input.MIDDLE_MOUSE,
"SCROLL_UP": core.input.WHEEL_UP,
"SCROLL_DOWN": core.input.WHEEL_DOWN,
}
self._song = ""
self._format = self.parameter("format", "{artist} - {title}")
self._tf_format = self.parameter("tf_format", "")
self._show_tf_when_stopped = util.format.asbool(
self.parameter("tf_format_if_stopped", False)
)
prev_button = self.parameter("previous", "LEFT_CLICK")
next_button = self.parameter("next", "RIGHT_CLICK")
pause_button = self.parameter("pause", "MIDDLE_CLICK")
self.now_playing = "deadbeef --nowplaying %a;%t;%b;%l;%n;%y;%c;%r;%e"
self.now_playing_tf = "deadbeef --nowplaying-tf "
cmd = "deadbeef "
core.input.register(self, button=buttons[prev_button], cmd=cmd + "--prev")
core.input.register(self, button=buttons[next_button], cmd=cmd + "--next")
core.input.register(
self, button=buttons[pause_button], cmd=cmd + "--play-pause"
)
# modify the tf_format if we don't want it to show on stop
# this adds conditions to the query itself, rather than
# polling to see if deadbeef is running
# doing this reduces the number of calls we have to make
if self._tf_format and not self._show_tf_when_stopped:
self._tf_format = "$if($or(%isplaying%,%ispaused%),{query})".format(
query=self._tf_format
)
@core.decorators.scrollable
def deadbeef(self, widget):
return self.string_song
def hidden(self):
return self.string_song == ""
def update(self):
widgets = self.widgets()
try:
if self._tf_format == "": # no tf format set, use the old style
return self.update_standard(widgets)
return self.update_tf(widgets)
except Exception as e:
logging.exception(e)
self._song = "error"
def update_tf(self, widgets):
## ensure that deadbeef is actually running
## easiest way to do this is to check --nowplaying for
## the string 'nothing'
if util.cli.execute(self.now_playing) == "nothing":
self._song = ""
return
## perform the actual query -- these can be much more sophisticated
data = util.cli.execute(self.now_playing_tf + '"'+self._tf_format+'"')
self._song = data
def update_standard(self, widgets):
data = util.cli.execute(self.now_playing)
if data == "nothing":
self._song = ""
else:
data = data.split(";")
self._song = self._format.format(
artist=data[0],
title=data[1],
album=data[2],
length=data[3],
trackno=data[4],
year=data[5],
comment=data[6],
copyright=data[7],
time=data[8],
)
@property
def string_song(self):
"""\
Returns the current song as a string, either as a unicode() (Python <
3) or a regular str() (Python >= 3)
"""
if sys.version_info.major < 3:
return unicode(self._song)
return str(self._song)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -3,343,251,353,740,123,000 | 36.306122 | 85 | 0.61178 | false |
danilovazb/sawef | Models/Crawler.py | 1 | 3965 | import threading
import time
import argparse
import requests
import json
import re,sys
from bs4 import BeautifulSoup
class crawler(object):
def request_page(self,url):
regex = re.compile('((https?|ftp)://|www\.)[^\s/$.?#].[^\s]*')
t_or_f = regex.match(url) is None
if t_or_f is True:
pass
else:
try:
user_agent = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; hu-HU; rv:1.7.8) Gecko/20050511 Firefox/1.0.4'}
response = requests.get(url,headers=user_agent,timeout=10.000)
html = response.text
return html
except:
pass
def parser_email(self,html):
regex = re.compile(("([a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`{|}~-]+)*(@|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)"))
try:
emails = re.findall(regex, html)
for count_email in xrange(len(emails)):
if emails[count_email][0] in self.list_email:
pass
else:
self.list_email.append(emails[count_email][0])
except:
pass
def filtro_http(self,url,link):
if url in link or 'http://' in link or 'https://' in link:
return link
else:
url = "%s%s" % (url, link)
def consulta_link(self,html,url):
soup = BeautifulSoup(html)
for link in soup.find_all('a', href=True):
if link['href'] in self.lista_links or '#' == link['href'] or '/' == link['href']:
pass
else:
try:
link_completo = self.filtro_http(url,link['href'])
html = self.request_page(link_completo)
self.parser_email(html)
self.lista_links.append(link_completo)
except:
pass
def __init__(self,url):
self.url = url
self.list_email = []
self.lista_links = []
def crawl(self):
#try:
url = self.url
ultimo_char = url[len(url)-1]
if '/' == ultimo_char:
pass
else:
url = "%s/" % url
html = self.request_page(url)
self.parser_email(html)
self.consulta_link(html,url)
############
## Habilite para um FULLSCAN - AVISO - DEMORA PRA CARALHO
############
# for i in xrange(len(self.lista_links)):
# #print lista_links[i]
# html = self.request_page(self.lista_links[i])
# if "www.facebook.com" in self.lista_links[i]:
# pass
# else:
# self.consulta_link(html,self.lista_links[i])
print "Emails: \n"
for email in xrange(len(self.list_email)):
print "[+] %s" % self.list_email[email]
print "\nTwitter:"
for twitter in xrange(len(self.lista_links)):
r_twitter = re.compile('(https?):\/\/(www\.|)twitter\.com\/(#!\/)?[a-zA-Z0-9_]+')
t_or_f = r_twitter.match(self.lista_links[twitter]) is None
if t_or_f is True:
pass
else:
print "[+] %s" % self.lista_links[twitter]
print "\nLinkedin:"
for linkedin in xrange(len(self.lista_links)):
r_linkedin = re.compile('(https?):\/\/(?:www|br|)?(\.?)linkedin\.com\/(in\/|company\/)?[a-zA-Z0-9_]+')
t_or_f = r_linkedin.match(self.lista_links[linkedin]) is None
if t_or_f is True:
pass
else:
print "[+] %s" % self.lista_links[linkedin]
print "\nGoogle Plus:"
for plus in xrange(len(self.lista_links)):
r_plus = re.compile('(https?):\/\/plus\.google\.com\/[+a-zA-Z0-9_]+')
t_or_f = r_plus.match(self.lista_links[plus]) is None
if t_or_f is True:
pass
else:
print "[+] %s" % self.lista_links[plus]
print "\nFacebook:"
for facebook in xrange(len(self.lista_links)):
r_facebook = re.compile('(https?):\/\/(www\.|)facebook\.com\/(pages\/|)[-/+a-zA-Z0-9_]+')
t_or_f = r_facebook.match(self.lista_links[facebook]) is None
if t_or_f is True:
pass
else:
print "[+] %s" % self.lista_links[facebook]
print "\nYoutube:"
for youtube in xrange(len(self.lista_links)):
r_youtube = re.compile('(https?):\/\/(www\.|)youtube\.com\/(user\/)?[a-zA-Z0-9_]+')
t_or_f = r_youtube.match(self.lista_links[youtube]) is None
if t_or_f is True:
pass
else:
print "[+] %s" % self.lista_links[youtube]
# except:
# pass
| gpl-3.0 | 6,717,556,753,020,677,000 | 29.976563 | 182 | 0.587894 | false |
deanishe/alfred-searchio | bin/gen_ddg.py | 1 | 1417 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2016 Dean Jackson <[email protected]>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2017-02-05
#
"""Generate Duck Duck Go engine JSON."""
from __future__ import print_function, absolute_import
from collections import namedtuple
import csv
import json
from common import datapath, mkdata, mkvariant
path = datapath('ddg-variants.tsv')
SEARCH_URL = 'https://duckduckgo.com/?kp=-1&kz=-1&kl={kl}&q={{query}}'
SUGGEST_URL = 'https://duckduckgo.com/ac/?kp=-1&kz=-1&kl={kl}&q={{query}}'
Variant = namedtuple('Variant', 'id name')
def variants():
"""DDG variants from `ddg-variants.tsv`.
Yields:
Variant: DDG variant
"""
with open(path) as fp:
for line in csv.reader(fp, delimiter='\t'):
yield Variant(*[s.decode('utf-8') for s in line])
def main():
"""Print DDG engine JSON to STDOUT."""
data = mkdata(u'Duck Duck Go', u'Alternative search engine',
jsonpath='$[*].phrase',)
for v in variants():
s = mkvariant(v.id.lower(), v.name,
u'Duck Duck Go {}'.format(v.name),
SEARCH_URL.format(kl=v.id),
SUGGEST_URL.format(kl=v.id),
)
data['variants'].append(s)
print(json.dumps(data, sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| mit | -7,735,466,253,962,262,000 | 24.303571 | 74 | 0.59139 | false |
Tset-Noitamotua/_learnpython | LearnPythonTheHardWay/ex39_hashmap.py | 1 | 1543 | # -*- coding: utf-8 -*-
def new(num_buckets=256):
"""Initializes a Map with the given number of buckets."""
aMap = []
for i in range(0, num_buckets):
aMap.append([])
return aMap
def hash_key(aMap, key):
"""Given a key this will create a number and then convert it to
an index for the aMap's buckets."""
return hash(key) % len(aMap)
def get_bucket(aMap, key):
"""Giben a key, find the bucket where it would go."""
bucket_id = hash_key(aMap, key)
return aMap[bucket_id]
def get_slot(aMap, key, default=None):
"""Return the index, key and the value of a slot found in a bucket."""
bucket = get_bucket(aMap, key)
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
return i, k, v
return -1, key, default
def get(aMap, key, default=None):
"""Gets the value in a bucket for the given key, or the default."""
i, k, v = get_slot(aMap, key, default=default)
return v
def set(aMap, key, value):
"""Sets the key to the value, replacing any existing value."""
bucket = get_bucket(aMap, key)
i, k, v = get_slot(aMap, key)
if v:
bucket[i] = (key, value)
else:
bucket.append((key, value))
def delete(aMap, key):
"""Deteltes the given key from the map."""
bucket = get_bucket(aMap, key)
for i in xrange(len(bucket)):
k, v = bucket[i]
if key == k:
del bucket[i]
break
def list(aMap):
"""Prints out what's in the Map."""
for bucket in aMap:
if bucket:
for k, v in bucket:
print k, v
| gpl-2.0 | -1,449,765,843,287,387,400 | 16.133333 | 71 | 0.601557 | false |
anurag03/integration_tests | cfme/tests/networks/test_sdn_topology.py | 1 | 2657 | import pytest
import random
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.utils.wait import wait_for
from cfme.utils.log import logger
pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider],
scope='module')]
@pytest.fixture(scope='module')
def elements_collection(setup_provider_modscope, appliance, provider):
elements_collection_ = appliance.collections.network_topology_elements
wait_for(elements_collection_.all, timeout=10)
yield elements_collection_
provider.delete_if_exists(cancel=False)
provider.wait_for_delete()
def test_topology_search(request, elements_collection):
"""Testing search functionality in Topology view.
Metadata:
test_flag: sdn
"""
elements = elements_collection.all()
logger.info(str(elements))
element_to_search = random.choice(elements)
search_term = element_to_search.name[:len(element_to_search.name) / 2]
elements_collection.search(search_term)
request.addfinalizer(elements_collection.clear_search)
for element in elements:
logger.info(str(element))
if search_term in element.name:
assert not element.is_opaqued, (
'Element should be not opaqued. Search: "{}", found: "{}"'.format(
search_term, element.name)
)
else:
assert element.is_opaqued, (
'Element should be opaqued. search: "{}", found: "{}"'.format(
search_term, element.name)
)
def test_topology_toggle_display(elements_collection):
"""Testing display functionality in Topology view.
Metadata:
test_flag: sdn
"""
vis_terms = {True: 'Visible', False: 'Hidden'}
for state in (True, False):
for legend in elements_collection.legends:
if state:
elements_collection.disable_legend(legend)
else:
elements_collection.enable_legend(legend)
for element in elements_collection.all():
assert (
element.type != ''.join(legend.split()).rstrip('s') or
element.is_displayed != state
), (
'Element is {} but should be {} since "{}" display is currently {}'.format(
vis_terms[not state], vis_terms[state], legend,
{True: 'on', False: 'off'}[state])
)
| gpl-2.0 | 9,006,612,870,615,107,000 | 36.422535 | 96 | 0.620625 | false |
tmc/django-cumulus | cumulus/storage.py | 1 | 3970 | """
Custom storage system for Mosso Cloud Files within Django.
"""
import re
from django.conf import settings
from django.core.files import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
try:
import cloudfiles
from cloudfiles.errors import NoSuchObject
except ImportError:
raise ImproperlyConfigured, "Could not load cloudfiles dependency. See http://www.mosso.com/cloudfiles.jsp."
try:
CUMULUS_USERNAME = settings.CUMULUS_USERNAME
CUMULUS_API_KEY = settings.CUMULUS_API_KEY
CUMULUS_CONTAINER = settings.CUMULUS_CONTAINER
except AttributeError:
raise ImproperlyConfigured, "CUMULUS_USERNAME, CUMULUS_API_KEY, and CUMULUS_CONTAINER must be supplied in settings.py."
CUMULUS_TTL = getattr(settings, 'CUMULUS_TTL', 600)
class CloudFileStorage(Storage):
"""
Custom storage for Mosso Cloud Files.
"""
def __init__(self):
"""
Here we set up the connection and select the user-supplied container.
If the container isn't public (available on Limelight CDN), we make
it a publicly available container.
"""
self.connection = cloudfiles.get_connection(CUMULUS_USERNAME,
CUMULUS_API_KEY)
self.container = self.connection.get_container(CUMULUS_CONTAINER)
if not self.container.is_public():
self.container.make_public()
def _get_cloud_obj(self, name):
"""
Helper function to get retrieve the requested Cloud Files Object.
"""
return self.container.get_object(name)
def _open(self, name, mode='rb'):
"""
Not sure if this is the proper way to execute this. Would love input.
"""
return File(self._get_cloud_obj(name).read())
def _save(self, name, content):
"""
Here we're opening the content object and saving it to the Cloud Files
service. We have to set the content_type so it's delivered properly
when requested via public URI.
"""
content.open()
if hasattr(content, 'chunks'):
content_str = content.chunks()
else:
content_str = content.read()
cloud_obj = self.container.create_object(name)
# try to pull a content type off of the File object
if hasattr(content, 'content_type'):
cloud_obj.content_type = content.content_type
# it's possible that it's an ImageFieldFile which won't have a direct
# 'content_type' attr. It would live on it's file attr though.
if hasattr(content, 'file') and hasattr(content.file, 'content_type'):
cloud_obj.content_type = content.file.content_type
cloud_obj.send(content_str)
content.close()
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
self.container.delete_object(name)
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
try:
self._get_cloud_obj(name)
return True
except NoSuchObject:
return False
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
return ([], self.container.list_objects(path=path))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
return self._get_cloud_obj(name).size()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a web browser.
"""
return self._get_cloud_obj(name).public_uri()
| bsd-3-clause | 6,804,919,831,210,843,000 | 33.824561 | 123 | 0.63073 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/notebook/services/sessions/tests/test_sessions_api.py | 1 | 4018 | """Test the sessions web service API."""
import errno
import io
import os
import json
import requests
import shutil
import time
pjoin = os.path.join
from notebook.utils import url_path_join
from notebook.tests.launchnotebook import NotebookTestBase, assert_http_error
from nbformat.v4 import new_notebook
from nbformat import write
class SessionAPI(object):
"""Wrapper for notebook API calls."""
def __init__(self, base_url):
self.base_url = base_url
def _req(self, verb, path, body=None):
response = requests.request(verb,
url_path_join(self.base_url, 'api/sessions', path), data=body)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()['message']
except:
pass
response.raise_for_status()
return response
def list(self):
return self._req('GET', '')
def get(self, id):
return self._req('GET', id)
def create(self, path, kernel_name='python'):
body = json.dumps({'notebook': {'path':path},
'kernel': {'name': kernel_name}})
return self._req('POST', '', body)
def modify(self, id, path):
body = json.dumps({'notebook': {'path':path}})
return self._req('PATCH', id, body)
def delete(self, id):
return self._req('DELETE', id)
class SessionAPITest(NotebookTestBase):
"""Test the sessions web service API"""
def setUp(self):
nbdir = self.notebook_dir.name
try:
os.mkdir(pjoin(nbdir, 'foo'))
except OSError as e:
# Deleting the folder in an earlier test may have failed
if e.errno != errno.EEXIST:
raise
with io.open(pjoin(nbdir, 'foo', 'nb1.ipynb'), 'w',
encoding='utf-8') as f:
nb = new_notebook()
write(nb, f, version=4)
self.sess_api = SessionAPI(self.base_url())
def tearDown(self):
for session in self.sess_api.list().json():
self.sess_api.delete(session['id'])
# This is necessary in some situations on Windows: without it, it
# fails to delete the directory because something is still using it. I
# think there is a brief period after the kernel terminates where
# Windows still treats its working directory as in use. On my Windows
# VM, 0.01s is not long enough, but 0.1s appears to work reliably.
# -- TK, 15 December 2014
time.sleep(0.1)
shutil.rmtree(pjoin(self.notebook_dir.name, 'foo'),
ignore_errors=True)
def test_create(self):
sessions = self.sess_api.list().json()
self.assertEqual(len(sessions), 0)
resp = self.sess_api.create('foo/nb1.ipynb')
self.assertEqual(resp.status_code, 201)
newsession = resp.json()
self.assertIn('id', newsession)
self.assertEqual(newsession['notebook']['path'], 'foo/nb1.ipynb')
self.assertEqual(resp.headers['Location'], '/api/sessions/{0}'.format(newsession['id']))
sessions = self.sess_api.list().json()
self.assertEqual(sessions, [newsession])
# Retrieve it
sid = newsession['id']
got = self.sess_api.get(sid).json()
self.assertEqual(got, newsession)
def test_delete(self):
newsession = self.sess_api.create('foo/nb1.ipynb').json()
sid = newsession['id']
resp = self.sess_api.delete(sid)
self.assertEqual(resp.status_code, 204)
sessions = self.sess_api.list().json()
self.assertEqual(sessions, [])
with assert_http_error(404):
self.sess_api.get(sid)
def test_modify(self):
newsession = self.sess_api.create('foo/nb1.ipynb').json()
sid = newsession['id']
changed = self.sess_api.modify(sid, 'nb2.ipynb').json()
self.assertEqual(changed['id'], sid)
self.assertEqual(changed['notebook']['path'], 'nb2.ipynb')
| mit | 6,653,004,737,363,431,000 | 31.403226 | 96 | 0.594326 | false |
asymworks/python-divelog | src/divelog/gui/settings.py | 1 | 3211 | # =============================================================================
#
# Copyright (C) 2011 Asymworks, LLC. All Rights Reserved.
# www.pydivelog.com / [email protected]
#
# This file is part of the Python Dive Logbook (pyDiveLog)
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# =============================================================================
from PySide.QtCore import QSettings
def read_setting(name, default=None):
'''
Read a Name/Value Setting
Reads a name/value setting from the Qt settings store. If the value does
not exist, the default parameter is returned. Note that QSettings stores
all settings as strings, so the caller is responsible for casting the
returned value into the proper Python type.
'''
s = QSettings()
s.beginGroup('Settings')
v = s.value(name)
s.endGroup()
if v is None:
return default
return v
def write_setting(name, value):
'''
Write a Name/Value Setting
Writes a name/value setting to the Qt settings store. Note that QSettings
stores all settings as strings, so the passed value will be converted to a
string using the python str() function prior to being stored.
'''
s = QSettings()
s.beginGroup('Settings')
s.setValue(name, value)
s.endGroup()
# Unit Conversion Table
_units = {
'depth': {
# NB: Depth is given in msw/fsw (pressure), _not_ meters/feet (length)
'meters': {
'to_native': lambda x: x,
'from_native': lambda x: x,
'abbr': u'msw'
},
'feet': {
'to_native': lambda x: x*0.30705,
'from_native': lambda x: x/0.30705,
'abbr': u'fsw'
},
},
'temperature': {
'celsius': {
'to_native': lambda x: x,
'from_native': lambda x: x,
'abbr': u'\u00b0C'
},
'farenheit': {
'to_native': lambda x: 5.0/9.0*(x-32),
'from_native': lambda x: x*9.0/5.0+32,
'abbr': u'\u00b0F'
},
},
}
def quantities():
'Return all Unit Quantities'
return _units.keys()
def units(qty):
'Return all Units for a Quantity'
if not qty in _units:
return []
return _units[qty].keys()
def abbr(qty, name):
'Return the abbreviation for the given Unit'
if not qty in _units or not name in _units[qty]:
return None
return _units[qty][name]['abbr']
def conversion(qty, name):
'Return a tuple with the to_native and from_native conversion functions'
if not qty in _units or not name in _units[qty]:
return (None, None)
q = _units[qty][name]
return (q['to_native'], q['from_native']) | gpl-2.0 | 3,340,102,028,908,228,000 | 30.490196 | 79 | 0.576145 | false |
Naeka/django-rest-framework-jsonapi | rest_framework_jsonapi/utils.py | 1 | 1686 | from __future__ import unicode_literals
from django.conf import settings
from django.utils.encoding import force_text
from rest_framework.compat import importlib
from rest_framework.serializers import ListSerializer, ManyRelatedField
from inflection import underscore, dasherize
def get_serializer(serializer):
if isinstance(serializer, ListSerializer):
return serializer.child
if isinstance(serializer, ManyRelatedField):
return serializer.child_relation
return serializer
def get_resource_type(model):
RESOURCE_TYPE_EXTRACTOR = getattr(
settings, "REST_FRAMEWORK", None).get("RESOURCE_TYPE_EXTRACTOR", None)
if RESOURCE_TYPE_EXTRACTOR:
try:
parts = RESOURCE_TYPE_EXTRACTOR.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)(model)
except (ImportError, AttributeError) as e:
msg = ("Could not import '{}' for API setting "
"'RESOURCE_TYPE_EXTRACTOR'. {}: {}.".format(
RESOURCE_TYPE_EXTRACTOR, e.__class__.__name__, e))
raise ImportError(msg)
return force_text(dasherize(underscore(model._meta.object_name)).strip())
def import_serializer(path):
try:
parts = path.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)()
except (ImportError, AttributeError):
raise ImportError("Could not import serializer '{}' from {}".format(
class_name, path))
| isc | 3,245,581,626,313,244,700 | 38.209302 | 78 | 0.654804 | false |
GoogleCloudPlatform/python-docs-samples | composer/dag_test_utils/internal_unit_testing.py | 1 | 1147 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for unit testing DAGs."""
# [START composer_dag_unit_testing]
from airflow import models
from airflow.utils.dag_cycle_tester import test_cycle
def assert_has_valid_dag(module):
"""Assert that a module contains a valid DAG."""
no_dag_found = True
for dag in vars(module).values():
if isinstance(dag, models.DAG):
no_dag_found = False
test_cycle(dag) # Throws if a task cycle is found.
if no_dag_found:
raise AssertionError('module does not contain a valid DAG')
# [END composer_dag_unit_testing]
| apache-2.0 | -1,164,161,452,601,557,500 | 32.735294 | 74 | 0.714908 | false |
OCA/stock-logistics-warehouse | stock_picking_completion_info/models/stock_picking.py | 1 | 2802 | # Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
from odoo import api, models, fields
class PickingType(models.Model):
_inherit = 'stock.picking.type'
display_completion_info = fields.Boolean(
help='Inform operator of a completed operation at processing and at'
' completion'
)
class StockPicking(models.Model):
_inherit = 'stock.picking'
completion_info = fields.Selection(
[
('no', 'No'),
(
'last_picking',
'Last picking: Completion of this operation allows next '
'operations to be processed.',
),
(
'next_picking_ready',
'Next operations are ready to be processed.',
),
(
'full_order_picking',
'Full order picking: You are processing a full order picking '
'that will allow next operation to be processed'
)
],
compute='_compute_completion_info',
)
@api.depends(
'picking_type_id.display_completion_info',
'move_lines.move_dest_ids.move_orig_ids.state',
)
def _compute_completion_info(self):
for picking in self:
if (
picking.state == 'draft'
or not picking.picking_type_id.display_completion_info
):
picking.completion_info = 'no'
continue
# Depending moves are all the origin moves linked to the
# destination pickings' moves
depending_moves = picking.move_lines.mapped(
'move_dest_ids.picking_id.move_lines.move_orig_ids'
)
# If all the depending moves are done or canceled then next picking
# is ready to be processed
if all(m.state in ('done', 'cancel') for m in depending_moves):
picking.completion_info = 'next_picking_ready'
continue
# If all the depending moves are the moves on the actual picking
# then it's a full order and next picking is ready to be processed
if depending_moves == picking.move_lines:
picking.completion_info = 'full_order_picking'
continue
# If there aren't any depending move from another picking that is
# not done, then actual picking is the last to process
other_depending_moves = (
depending_moves - picking.move_lines
).filtered(lambda m: m.state not in ('done', 'cancel'))
if not other_depending_moves:
picking.completion_info = 'last_picking'
continue
picking.completion_info = 'no'
| agpl-3.0 | -6,114,512,560,388,550,000 | 35.868421 | 79 | 0.55853 | false |
JIghtuse/cppcheck | tools/daca2-addons.py | 1 | 3753 | #!/usr/bin/python
#
# 1. Create a folder daca2-addons in your HOME folder
# 2. Put cppcheck-O2 in daca2-addons. It should be built with all optimisations.
# 3. Optional: Put a file called "suppressions.txt" in the daca2-addons folder.
# 4. Optional: tweak FTPSERVER and FTPPATH in this script below.
# 5. Run the daca2-addons script: python daca2-addons.py FOLDER
import subprocess
import sys
import glob
import os
import datetime
import time
from daca2_lib import download_and_unpack, getpackages, removeAll
RESULTS_FILENAME = 'results.txt'
def dumpfiles(path):
ret = []
for g in glob.glob(path + '*'):
if os.path.islink(g):
continue
if os.path.isdir(g):
for df in dumpfiles(path + g + '/'):
ret.append(df)
elif os.path.isfile(g) and g[-5:] == '.dump':
ret.append(g)
return ret
def scanarchive(filepath, jobs):
removeAll(exceptions=[RESULTS_FILENAME])
download_and_unpack(filepath)
#
# List of skipped packages - which trigger known yet unresolved problems with cppcheck.
# The issues on trac (http://trac.cppcheck.net) are given for reference
# boost #3654 (?)
# flite #5975
# insight#5184
# valgrind #6151
# gcc-arm - no ticket. Reproducible timeout in daca2 though as of 1.73/early 2016.
#
if filename[:5] == 'flite' or filename[:5] == 'boost' or filename[:7] == 'insight' or filename[:8] == 'valgrind' or filename[:7] == 'gcc-arm':
results = open('results.txt', 'at')
results.write('fixme: skipped package to avoid hang\n')
results.close()
return
def keep_predicate(path):
return os.path.splitext(path)[1] in ['.txt']
removeLargeFiles('', keep_predicate)
print('cppcheck ' + filename)
p = subprocess.Popen(
['nice',
'../cppcheck-O2',
'--dump',
'-D__GCC__',
'--enable=style',
'--error-exitcode=0',
jobs,
'.'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
comm = p.communicate()
results = open('results.txt', 'at')
addons = sorted(glob.glob(os.path.expanduser('~/cppcheck/addons/*.py')))
for dumpfile in sorted(dumpfiles('')):
for addon in addons:
if addon.find('cppcheckdata.py') > 0:
continue
p2 = subprocess.Popen(['nice',
'python',
addon,
dumpfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
comm = p2.communicate()
results.write(comm[1])
results.close()
FOLDER = None
JOBS = '-j1'
REV = None
for arg in sys.argv[1:]:
if arg[:6] == '--rev=':
REV = arg[6:]
elif arg[:2] == '-j':
JOBS = arg
else:
FOLDER = arg
if not FOLDER:
print('no folder given')
sys.exit(1)
archives = getpackages(FOLDER)
if len(archives) == 0:
print('failed to load packages')
sys.exit(1)
print('Sleep for 10 seconds..')
time.sleep(10)
workdir = os.path.expanduser('~/daca2/')
print('~/daca2/' + FOLDER)
if not os.path.isdir(workdir + FOLDER):
os.makedirs(workdir + FOLDER)
os.chdir(workdir + FOLDER)
try:
results = open('results.txt', 'wt')
results.write('STARTDATE ' + str(datetime.date.today()) + '\n')
if REV:
results.write('GIT-REVISION ' + REV + '\n')
results.write('\n')
results.close()
for archive in archives:
scanarchive(archive, JOBS)
results = open('results.txt', 'at')
results.write('DATE ' + str(datetime.date.today()) + '\n')
results.close()
except EOFError:
pass
removeAll(exceptions=[RESULTS_FILENAME])
| gpl-3.0 | 5,773,448,597,383,888,000 | 26 | 146 | 0.583533 | false |
myevan/flask_server | examples/flask_sqlaclhemy_sharding.py | 1 | 6928 | # -*- coding:utf8 -*-
import re
from flask import Flask
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
from flask_sqlalchemy import _SignallingSession as BaseSignallingSession
from flask_sqlalchemy import orm, partial, get_state
from datetime import datetime
class _BindingKeyPattern(object):
def __init__(self, db, pattern):
self.db = db
self.raw_pattern = pattern
self.compiled_pattern = re.compile(pattern)
self._shard_keys = None
def __repr__(self):
return "%s<%s>" % (self.__class__.__name__, self.raw_pattern)
def match(self, key):
return self.compiled_pattern.match(key)
def get_shard_key(self, hash_num):
if self._shard_keys is None:
self._shard_keys = [key for key, value in self.db.app.config['SQLALCHEMY_BINDS'].iteritems() if self.compiled_pattern.match(key)]
self._shard_keys.sort()
return self._shard_keys[hash_num % len(self._shard_keys)]
class _BoundSection(object):
def __init__(self, db_session_cls, name):
self.db_session = db_session_cls()
self.name = name
def __enter__(self):
self.db_session.push_binding(self.name)
def __exit__(self, exc_type, exc_val, exc_tb):
self.db_session.pop_binding()
self.db_session.close()
class _SignallingSession(BaseSignallingSession):
def __init__(self, *args, **kwargs):
BaseSignallingSession.__init__(self, *args, **kwargs)
self._binding_keys = []
self._binding_key = None
def push_binding(self, key):
self._binding_keys.append(self._binding_key)
self._binding_key = key
def pop_binding(self):
self._binding_key = self._binding_keys.pop()
def get_bind(self, mapper, clause=None):
binding_key = self.__find_binding_key(mapper)
if binding_key is None:
return BaseSignallingSession.get_bind(self, mapper, clause)
else:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=binding_key)
def __find_binding_key(self, mapper):
if mapper is None: # 맵퍼 없음
return self._binding_key
else:
mapper_info = getattr(mapper.mapped_table, 'info', {})
mapped_binding_key = mapper_info.get('bind_key')
if mapped_binding_key: # 맵핑된 바인딩 키 존재
if type(mapped_binding_key) is str: # 정적 바인딩
return mapped_binding_key
else: # 동적 바인딩
if mapped_binding_key.match(self._binding_key): # 현재 바인딩
return self._binding_key
else: # 푸쉬된 바인딩
for pushed_binding_key in reversed(self._binding_keys):
if pushed_binding_key and mapped_binding_key.match(pushed_binding_key):
return pushed_binding_key
else:
raise Exception('NOT_FOUND_MAPPED_BINDING:%s CURRENT_BINDING:%s PUSHED_BINDINGS:%s' % (repr(mapped_binding_key), repr(self._binding_key), repr(self._binding_keys[1:])))
else: # 맵핑된 바인딩 키가 없으면 디폴트 바인딩
return self._binding_key
class SQLAlchemy(BaseSQLAlchemy):
def BindingKeyPattern(self, pattern):
return _BindingKeyPattern(self, pattern)
def binding(self, key):
return _BoundSection(self.session, key)
def create_scoped_session(self, options=None):
if options is None:
options = {}
scopefunc=options.pop('scopefunc', None)
return orm.scoped_session(
partial(_SignallingSession, self, **options), scopefunc=scopefunc
)
def get_binds(self, app=None):
retval = BaseSQLAlchemy.get_binds(self, app)
bind = None
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def get_tables_for_bind(self, bind=None):
result = []
for table in self.Model.metadata.tables.itervalues():
table_bind_key = table.info.get('bind_key')
if table_bind_key == bind:
result.append(table)
else:
if bind:
if type(table_bind_key) is _BindingKeyPattern and table_bind_key.match(bind):
result.append(table)
elif type(table_bind_key) is str and table_bind_key == bind:
result.append(table)
return result
app = Flask(__name__)
db = SQLAlchemy(app)
class Notice(db.Model):
__bind_key__ = 'global'
id = db.Column(db.Integer, primary_key=True)
msg = db.Column(db.String, nullable=False)
ctime = db.Column(db.DateTime, default=datetime.now(), nullable=False)
def __repr__(self):
return "%s<id=%d,msg='%s'>" % (self.__class__.__name__, self.id, self.msg)
class User(db.Model):
__bind_key__ = db.BindingKeyPattern('[^_]+_user_\d\d')
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(80), unique=True)
login_logs = db.relationship(lambda: LoginLog, backref='owner')
def __repr__(self):
return "%s<id=%d, nickname='%s'>" % (self.__class__.__name__, self.id, self.nickname)
@classmethod
def get_shard_key(cls, nickname):
return cls.__bind_key__.get_shard_key(hash(nickname))
class LoginLog(db.Model):
__bind_key__ = db.BindingKeyPattern('[^_]+_log')
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
ctime = db.Column(db.DateTime, default=datetime.now(), nullable=False)
if __name__ == '__main__':
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_BINDS'] = {
'global': 'sqlite:///./global.db',
'master_user_01': 'sqlite:///./master_user_01.db',
'master_user_02': 'sqlite:///./master_user_02.db',
'slave_user': 'sqlite:///./slave_user.db',
'master_log': 'sqlite:///./master_log.db',
'slave_log': 'sqlite:///./slave_log.db',
}
db.drop_all()
db.create_all()
notice = Notice(msg='NOTICE1')
db.session.add(notice)
db.session.commit()
nickname = 'jaru'
with db.binding(User.get_shard_key(nickname)):
notice = Notice(msg='NOTICE2')
db.session.add(notice)
db.session.commit()
user = User(nickname=nickname)
db.session.add(user)
db.session.commit()
with db.binding('master_log'):
notice = Notice(msg='NOTICE3')
db.session.add(notice)
db.session.commit()
login_log = LoginLog(owner=user)
db.session.add(login_log)
db.session.commit()
| mit | -6,155,779,706,041,585,000 | 33.13 | 196 | 0.585116 | false |
Cadasta/cadasta-platform | cadasta/accounts/migrations/0003_add_change_pw.py | 1 | 1517 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-16 09:51
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_activate_users_20161014_0846'),
]
operations = [
migrations.AddField(
model_name='historicaluser',
name='change_pw',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='user',
name='change_pw',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='historicaluser',
name='username',
field=models.CharField(db_index=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| agpl-3.0 | 2,542,327,888,333,276,000 | 41.138889 | 319 | 0.634146 | false |
asnt/fablablux-schedule | fablab_schedule/api.py | 1 | 3294 | from __future__ import print_function
import argparse
import json
import random
import requests
from fablab_schedule import config
class ScheduleService:
base_route = "?rest_route=/open-access/v1"
endpoints = {
"status": "/",
"schedule": "/machine-schedule",
}
def __init__(self, base_url, username="", password=""):
if not base_url.endswith("/"):
base_url = base_url + "/"
self.base_url = base_url + ScheduleService.base_route
self.username = username
self.password = password
def url_for(self, service):
if service not in ScheduleService.endpoints:
raise ValueError("uknown service {}".format(service))
endpoint = ScheduleService.endpoints[service]
return self.base_url + endpoint
def status(self):
url = self.url_for("status")
r = requests.get(url)
print("get " + url)
print(r.status_code)
print(r.text)
def get(self):
url = self.url_for("schedule")
r = requests.get(url)
print("get " + url)
print(r.status_code)
try:
table = json.loads(r.json()).get("table", None)
print_table(table)
except json.decoder.JSONDecodeError as e:
print(e.__class__.__name__)
print(e)
print(r.text)
def post(self, table):
url = self.url_for("schedule")
json_data = dict(table=table)
credentials = dict(username=self.username, password=self.password)
r = requests.post(url, params=credentials, json=json_data)
print("post " + url)
print(r.status_code)
try:
data = json.loads(r.json()).get("data", None)
except json.decoder.JSONDecodeError as e:
print(e.__class__.__name__)
print(e)
print(r.text)
else:
if data is not None:
table = data.get("table", None)
print_table(table)
else:
print(r.text)
def print_table(table):
for row in table:
for booked in row:
symbol = "X" if booked else "-"
print(symbol, end=" ")
print()
def generate_random_table():
n_slots = 9
n_machines = 7
table = [[bool(round(random.random())) for __ in range(n_slots)]
for __ in range(n_machines)]
return table
def main():
description = "Communicate with the REST API of the FabLab wall schedule" \
" WordPress plugin"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("command", choices=["get", "status", "post"])
parser.add_argument("-c", "--config", help="alternate config file")
args = parser.parse_args()
if args.config:
conf = config.from_file(args.config)
else:
conf = config.get()
url = conf["base_url"]
user = conf["username"]
password = conf["password"]
service = ScheduleService(url, user, password)
command = args.command
if command == "status":
service.status()
elif command == "get":
service.get()
elif command == "post":
table = generate_random_table()
service.post(table)
if __name__ == "__main__":
main()
| gpl-3.0 | -8,429,851,478,355,358,000 | 26.680672 | 79 | 0.562842 | false |
michaelimfeld/notipy-server | tests/unit/test_userregistration.py | 1 | 2922 | """
`notipyserver` - User-Notification-Framework server
Provides test cases for the notipyserver userregistration module.
:copyright: (c) by Michael Imfeld
:license: MIT, see LICENSE for details
"""
from mock import patch, MagicMock
from nose.tools import assert_equal, assert_raises
import telegram
from notipyserver.backends.telegram.userregistration import register
def test_register_user_no_username():
"""
Test register user if the user has no username
"""
bot = MagicMock()
bot.sendMessage = MagicMock()
update = MagicMock()
update.message.chat.type = "private"
update.message.chat.username = ""
update.message.chat_id = 1234
register(bot, update)
bot.sendMessage.assert_called_with(
chat_id=1234, text="Please setup a telegram username to use this bot.")
def test_register_user():
"""
Test register user
"""
bot = MagicMock()
bot.sendMessage = MagicMock()
update = MagicMock()
update.message.chat.type = "private"
update.message.chat.username = "foouser"
update.message.chat.first_name = "Foo"
update.message.chat_id = 1234
add_user = "notipyserver.backends.telegram.userregistration.add_user"
with patch(add_user) as mock:
mock.return_value = True
register(bot, update)
bot.sendMessage.assert_called_with(
chat_id=1234,
text="Hi Foo!\nYour registration was *successful* 🎉.",
parse_mode=telegram.ParseMode.MARKDOWN)
def test_register_user_already_reg():
"""
Test register user if the user is already registered
"""
bot = MagicMock()
bot.sendMessage = MagicMock()
update = MagicMock()
update.message.chat.type = "private"
update.message.chat.username = "foouser"
update.message.chat.first_name = "Foo"
update.message.chat_id = 1234
add_user = "notipyserver.backends.telegram.userregistration.add_user"
with patch(add_user) as mock:
mock.return_value = False
register(bot, update)
bot.sendMessage.assert_called_with(
chat_id=1234,
text="Already registered!",
parse_mode=telegram.ParseMode.MARKDOWN)
def test_register_group():
"""
Test register group
"""
bot = MagicMock()
bot.sendMessage = MagicMock()
update = MagicMock()
update.message.chat.type = "group"
update.message.chat.username = None
update.message.chat.first_name = None
update.message.chat_id = 1234
update.message.chat.title = "Test Group"
add_group = "notipyserver.backends.telegram.userregistration.add_group"
with patch(add_group) as mock:
mock.return_value = True
register(bot, update)
bot.sendMessage.assert_called_with(
chat_id=1234,
text="Hi Test Group!\nYour registration was *successful* 🎉.",
parse_mode=telegram.ParseMode.MARKDOWN)
| mit | 1,112,391,796,362,147,500 | 26.252336 | 79 | 0.662551 | false |
Treeki/Reggie | mac_setup.py | 1 | 1121 | """
Usage:
cd to it's directory
python mac_setup.py py2app
"""
from setuptools import setup
import os, sys, shutil
NAME = 'Reggie!'
VERSION = '1.0'
plist = dict(
CFBundleIconFile=NAME,
CFBundleName=NAME,
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join([NAME, VERSION]),
CFBundleExecutable=NAME,
CFBundleIdentifier='ca.chronometry.reggie',
)
APP = ['reggie.py']
DATA_FILES = ['reggiedata', 'archive.py', 'common.py', 'license.txt', 'lz77.py', 'nsmblib-0.4.zip', 'readme.txt', 'sprites.py']
OPTIONS = {
'argv_emulation': True,
# 'graph': True,
'iconfile': 'reggiedata/reggie.icns',
'plist': plist,
# 'xref': True,
'includes': ['sip', 'encodings', 'encodings.hex_codec', 'PyQt4', 'PyQt4.QtCore', 'PyQt4.QtGui'],
'excludes': ['PyQt4.QtWebKit', 'PyQt4.QtDesigner', 'PyQt4.QtNetwork', 'PyQt4.QtOpenGL',
'PyQt4.QtScript', 'PyQt4.QtSql', 'PyQt4.QtTest', 'PyQt4.QtXml', 'PyQt4.phonon', 'nsmblibmodule'],
'compressed': 0,
'optimize': 0
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| gpl-2.0 | 2,327,040,555,400,134,000 | 23.369565 | 127 | 0.652096 | false |
google/iree | integrations/tensorflow/e2e/batch_to_space_nd_test.py | 1 | 1348 | # Lint as: python3
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Batch To Space ND tests."""
from absl import app
from iree.tf.support import tf_test_utils
from iree.tf.support import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
class BatchtoSpaceModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([3, 5, 2], tf.float32)])
def batch_to_space_nd(self, batched):
block_shape = [3]
paddings = [[3, 4]]
return tf.compat.v1.batch_to_space_nd(batched, block_shape, paddings)
class BatchtoSpaceTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(BatchtoSpaceModule)
def test_space_to_batch_inference(self):
def space_to_batch_inference(module):
x = np.linspace(0, 29, 30, dtype=np.float32)
x = np.reshape(x, [3, 5, 2])
module.batch_to_space_nd(x)
self.compare_backends(space_to_batch_inference, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 6,645,117,489,120,919,000 | 26.510204 | 73 | 0.691395 | false |
peterwilletts24/Python-Scripts | EMBRACE/temp_on_p_levs_mean.py | 1 | 2253 | """
Load multiple pp diagnostic files, aggregate by year, day etc, calcualte mean, sum etc and save
"""
import os, sys
import datetime
import iris
import iris.unit as unit
from iris.coord_categorisation import add_categorised_coord
import pdb
diag = '_temp_on_p_levs'
pp_file_path='/nfs/a90/eepdw/Data/EMBRACE/'
experiment_ids = [ 'dklyu', 'dkmbq', 'djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq'] # All minus large 3
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq', 'dkmbq', 'dklzq']
#experiment_ids = ['dkmgw']
#pdb.set_trace()
dtmindt = datetime.datetime(2011,8,19,0,0,0)
dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
fg = '%sdjzn/djznw/djznw%s.pp' % (pp_file_path, diag)
glob_load = iris.load_cube(fg, time_constraint)
## Get time points from global LAM to use as time constraint when loading other runs
time_list = glob_load.coord('time').points
glob_tc = iris.Constraint(time=time_list)
del glob_load
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
fu = '%s%s/%s/%s%s.pp' % (pp_file_path, expmin1, experiment_id, experiment_id, diag)
print experiment_id
sys.stdout.flush()
try:
#cube_names = ['%s' % cube_name_param, '%s' % cube_name_explicit]
cube = iris.load_cube(fu)
#time_coords = cube.coord('time')
try:
os.remove('%s%s/%s/%s%s_mean.pp' % (pp_file_path, expmin1, experiment_id, experiment_id, diag))
except OSError,e:
print '%s%s/%s/%s%s_mean.pp NOT REMOVED' % (pp_file_path, expmin1, experiment_id, experiment_id, diag)
print e
pass
for height_slice in cube.slices(['time', 'grid_latitude', 'grid_longitude']):
#pdb.set_trace()
mean = height_slice.collapsed('time', iris.analysis.MEAN)
iris.save((mean),'%s%s/%s/%s%s_mean.pp' % (pp_file_path, expmin1, experiment_id, experiment_id, diag), append=True)
except iris.exceptions.ConstraintMismatchError:
pass
| mit | 3,748,510,054,328,490,500 | 31.652174 | 127 | 0.651132 | false |
chalamka/battle-notifier | battle_notifier.py | 1 | 7276 | import logging
import json
import sys
import datetime as dt
import os
import time
from urllib.error import HTTPError
import slack_webhooks as slack
import worldoftanks_requests as wot
from itertools import dropwhile
from math import log2, ceil
class BattleNotifier:
def __init__(self, config_path='config.json', log_level=logging.CRITICAL):
self.battles = []
self.logger = configure_logging(log_level)
config = self._load_config(config_path)
self.application_id = config['application_id']
self.clan_id = config['clan_id']
self.bot_name = config['bot_name']
self.icon_emoji = config['icon_emoji']
self.channel_name = config['channel_name']
self.slack_url = config['slack_url']
self.update_interval = config['update_interval']
self.clan_tag = config['clan_tag']
def run(self):
while True:
try:
# this check tries to deal with a WG API issue where battles return an incorrect timestamp
if dt.datetime.now().minute == 0:
time.sleep(90)
if dt.datetime.now().minute == 1:
time.sleep(30)
if self._update_battles():
self.logger.info("Found battle")
self._slack_notification()
time.sleep(self.update_interval)
self.logger.info("Sleep cycle completed")
except KeyboardInterrupt:
self.logger.critical("Interrupted, shutting down")
sys.exit(1)
def _update_battles(self):
new_battles = self._get_new_battles()
if not new_battles:
return False
else:
self.battles += new_battles
self.battles.sort(key=lambda x: x[0].time)
self.battles = list(dropwhile(lambda x: dt.datetime.fromtimestamp(x[0].time) < dt.datetime.now(), self.battles))
if self.battles:
return True
else:
return False
def _get_new_battles(self):
try:
new_battles = wot.get_cw_battles(self.application_id, self.clan_id)
battles_info = [(battle,
wot.get_province_info(self.application_id, battle.front_id, battle.province_id),
wot.get_clan_info(self.application_id, battle.competitor_id))
for battle in new_battles if battle.battle_id not in [b.battle_id for b, _, __ in self.battles]]
return battles_info
except HTTPError:
self.logger.error("HTTP Error when getting battles")
return []
def _simul_check(self, battle):
simuls = []
for b, _, __ in self.battles:
if abs((b.convert_time() - battle.convert_time()).total_seconds() / 60) < 5 and battle.battle_id != b.battle_id:
simuls.append(b)
return simuls
def _slack_notification(self):
attachments = []
notification_level = "<!here>"
thumb_url = "http://na.wargaming.net/clans/media/clans/emblems/cl_{}/{}/emblem_64x64.png"
current_time = dt.datetime.now()
for battle, province, clan in self.battles:
if not battle.notified:
battle.notified = True
province_text = "*Province:* {} *Map:* {} *Server:* {}".format(province.province_name,
province.arena_name,
province.server)
battle_start_time = dt.datetime.fromtimestamp(int(battle.time))
time_until_battle = battle_start_time - current_time
minutes_until_battle = time_until_battle.total_seconds() / 60
if battle.type == 'attack' and battle.attack_type == 'tournament':
time_text = "Tournament Round {} begins at {} CST popping in {} minutes".format(
province.round_number,
battle_start_time.strftime("%H:%M"),
int(minutes_until_battle - 14))
if battle.competitor_id == province.owner_clan_id:
time_text += "\n*Tank locking is active in this battle*"
else:
time_text = "*{}* begins at {} CST popping in {} minutes\n*Tank locking is active in this battle*".format(
battle.type.title(),
battle_start_time.strftime("%H:%M"),
int(minutes_until_battle - 14))
simul_text = ""
simuls = self._simul_check(battle)
if simuls:
simul_text = "There are {} battles occurring at this time: {}, {}.".format(
len(simuls) + 1,
province.province_name,
", ".join([b.province_name for b in simuls]))
battle_attachment = slack.build_slack_attachment(fallback="Upcoming CW battle vs. {}".format(clan.tag),
pretext="",
fields=[],
title=":{0}: {0} vs. {1} :fire:".format(self.clan_tag, clan.tag),
level="good" if battle.type == 'defence' else "danger",
thumb_url=thumb_url.format(str(clan.clan_id)[-3:], clan.clan_id),
text="{}\n{}\n{}".format(province_text, time_text, simul_text),
markdown_in=['text'])
attachments.append(battle_attachment)
payload = slack.build_slack_payload(attachments, notification_level, self.bot_name, self.icon_emoji, self.channel_name)
if attachments:
slack.send_slack_webhook(self.slack_url, payload)
self.logger.info("Slack webhook notification sent")
def _load_config(self, filename):
try:
with open(filename) as fp:
return json.load(fp)
except IOError:
self.logger.critical("Failed to load configuration file: {}".format(filename))
self.logger.critical("Exiting (cannot load config)")
sys.exit(1)
def write_json(filename, to_write):
with open(filename, 'w') as fp:
return json.dump(to_write, fp)
def configure_logging(level):
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if not os.path.exists("logs/"):
os.mkdir("logs/")
l = logging.getLogger(__name__)
logger_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler("logs/" + dt.date.today().strftime("%m-%d-%Y.log"))
l.setLevel(level)
file_handler.setFormatter(logger_format)
l.addHandler(file_handler)
return l
def main(args):
bn = BattleNotifier()
bn.run()
if __name__ == "__main__":
main(sys.argv)
| mit | 2,196,117,582,285,152,000 | 41.302326 | 130 | 0.522815 | false |
newlawrence/poliastro | src/poliastro/threebody/restricted.py | 1 | 3754 | """Circular Restricted 3-Body Problem (CR3BP)
Includes the computation of Lagrange points
"""
import numpy as np
from astropy import units as u
from scipy.optimize import brentq
from poliastro.util import norm
@u.quantity_input(r12=u.km, m1=u.kg, m2=u.kg)
def lagrange_points(r12, m1, m2):
"""Computes the Lagrangian points of CR3BP.
Computes the Lagrangian points of CR3BP given the distance between two
bodies and their masses.
It uses the formulation found in Eq. (2.204) of Curtis, Howard. 'Orbital
mechanics for engineering students'. Elsevier, 3rd Edition.
Parameters
----------
r12 : ~astropy.units.Quantity
Distance between the two bodies
m1 : ~astropy.units.Quantity
Mass of the main body
m2 : ~astropy.units.Quantity
Mass of the secondary body
Returns
-------
~astropy.units.Quantity
Distance of the Lagrangian points to the main body,
projected on the axis main body - secondary body
"""
pi2 = (m2 / (m1 + m2)).value
def eq_L123(xi):
aux = (1 - pi2) * (xi + pi2) / abs(xi + pi2)**3
aux += pi2 * (xi + pi2 - 1) / abs(xi + pi2 - 1)**3
aux -= xi
return aux
lp = np.zeros((5,))
# L1
tol = 1e-11 # `brentq` uses a xtol of 2e-12, so it should be covered
a = - pi2 + tol
b = 1 - pi2 - tol
xi = brentq(eq_L123, a, b)
lp[0] = xi + pi2
# L2
xi = brentq(eq_L123, 1, 1.5)
lp[1] = xi + pi2
# L3
xi = brentq(eq_L123, -1.5, -1)
lp[2] = xi + pi2
# L4, L5
# L4 and L5 are points in the plane of rotation which form an equilateral
# triangle with the two masses (Battin)
# (0.5 = cos(60 deg))
lp[3] = lp[4] = 0.5
return lp * r12
@u.quantity_input(m1=u.kg, r1=u.km,
m2=u.kg, r2=u.km,
n=u.one)
def lagrange_points_vec(m1, r1, m2, r2, n):
"""Computes the five Lagrange points in the CR3BP.
Returns the positions in the same frame of reference as `r1` and `r2`
for the five Lagrangian points.
Parameters
----------
m1 : ~astropy.units.Quantity
Mass of the main body. This body is the one with the biggest mass.
r1 : ~astropy.units.Quantity
Position of the main body.
m2 : ~astropy.units.Quantity
Mass of the secondary body.
r2 : ~astropy.units.Quantity
Position of the secondary body.
n : ~astropy.units.Quantity
Normal vector to the orbital plane.
Returns
-------
list:
Position of the Lagrange points: [L1, L2, L3, L4, L5]
The positions are of type ~astropy.units.Quantity
"""
# Check Body 1 is the main body
assert m1 > m2, "Body 1 is not the main body: it has less mass than the 'secondary' body"
# Define local frame of reference:
# Center: main body, NOT the barycenter
# x-axis: points to the secondary body
ux = r2 - r1
r12 = norm(ux)
ux = ux / r12
# y-axis: contained in the orbital plane, perpendicular to x-axis
def cross(x, y):
return np.cross(x, y) * x.unit * y.unit
uy = cross(n, ux)
uy = uy / norm(uy)
# position in x-axis
x1, x2, x3, x4, x5 = lagrange_points(r12, m1, m2)
# position in y-axis
# L1, L2, L3 are located in the x-axis, so y123 = 0
# L4 and L5 are points in the plane of rotation which form an equilateral
# triangle with the two masses (Battin)
# sqrt(3)/2 = sin(60 deg)
y4 = np.sqrt(3) / 2 * r12
y5 = - y4
# Convert L points coordinates (x,y) to original vectorial base [r1 r2]
L1 = r1 + ux * x1
L2 = r1 + ux * x2
L3 = r1 + ux * x3
L4 = r1 + ux * x4 + uy * y4
L5 = r1 + ux * x5 + uy * y5
return [L1, L2, L3, L4, L5]
| mit | 3,683,252,528,897,132,500 | 25.814286 | 93 | 0.591636 | false |
sofeien/PythonLearn | Baidu_v3.py | 1 | 6660 | #coding='utf-8'
from tkinter import *
from tkinter.ttk import *
import requests
import urllib.parse
import os
import time
import threading
class BaiduSpider:
def __init__(self):
self.getNum = 0
self.failNum = 0
self.decode_dict = [{"_z2C$q": ":", "AzdH3F": "/", "_z&e3B": "."}, {"O": "O", "4": "m", "N": "N", "R": "R", "z": "z", "7": "u", "e": "v", "o": "w", "1": "d", "x": "x", "M": "M", "p": "t", "j": "e", "3": "j", "9": "4", "H": "H", "A": "A", "S": "S", "i": "h", "k": "b", "g": "n", "_": "_", "C": "C", "d": "2", "m": "6", "8": "1", ":": ":", "2": "g", "n": "3", "u": "f", "D": "D", "B": "B", "/": "/", "w": "a", "f": "s", ".": ".", "T": "T", "%": "%", "s": "l", "0": "7", "r": "p", "E": "E", "l": "9", "6": "r", "a": "0", "t": "i", "-": "-", "v": "c", "b": "8", "L": "L", "5": "o", "Q": "Q", "c": "5", "=": "=", "h": "k"}]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Accept': 'text/plain, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8', 'Connection': 'keep-alive'}
self.s = requests.Session()
self.s.headers.update(headers)
self.url_temp = 'http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={word}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word={word}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn={pn}&rn=30'
self.url_temp2 = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word={word}&cg={cg}&pn={pn}&rn=30&itg=1&z=&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=100003c'
def _changecode(self, text, dic):
trans_dict = {re.escape(key): value for key, value in dic.items()}
pattern = re.compile('|'.join(trans_dict.keys()))
return pattern.sub(lambda m: trans_dict[re.escape(m.group(0))], text)
def _getpage(self, word, n, size, path, mode, cg):
if mode == 1:
generator = (self.url_temp.format(word=urllib.parse.quote(word), size=size, pn=x * 30) for x in range(0, n))
for page in generator:
try:
r = self.s.get(page)
except TimeoutError:
print('连接超时')
return
r.encoding = 'utf-8'
for url in r.json()['data']:
try:
url = url['objURL']
except KeyError:
break
for dic in self.decode_dict:
url = self._changecode(url, dic)
self._downpic(url, path, word)
else:
generator = (self.url_temp2.format(word=urllib.parse.quote(word), cg=cg, pn=x * 30) for x in range(0, n))
for page in generator:
try:
r = self.s.get(page)
except TimeoutError:
print('连接超时')
r.encoding = 'utf-8'
for url in r.json()['imgs']:
try:
url = url['objURL']
except KeyError:
break
self._downpic(url, path, word)
def _downpic(self, url, path, word):
path = os.path.join(path, word)
if not os.path.isdir(path):
os.makedirs(path)
print(url, end=" loading... ")
name = url.split('.')[-1]
name = str(self.getNum + 1) + '.' + name
try:
r = self.s.get(url, stream=True, timeout=3)
# logging.warning(r.status_code)
if r.status_code != 200:
raise Exception('not connect')
pic_path = os.path.join(path, name)
while os.path.isfile(pic_path):
name = 'new_' + name
pic_path = os.path.join(path, name)
with open(pic_path, 'wb') as f:
for chunk in r.iter_content(1024 * 10):
f.write(chunk)
print('下载成功')
self.getNum += 1
except:
print('=== 下载失败 ===')
self.failNum += 1
return
# size=[3-‘大尺寸’] [0,''-‘不筛选或自定义尺寸'] [9-‘特大尺寸’] [2-‘中尺寸’]
# mode [1-普通下载,0-特殊下载]
# cg ['wallpaper'-'壁纸','head'-'头像']
def load(self, word, n=1, size=3, path=r'd:\BaiduPic', mode=1, cg='wallpaper'):
if not os.path.isdir(path):
os.makedirs(path)
self._getpage(word, n, size, path, mode, cg)
return '下载成功{}张,下载失败{}张'.format(self.getNum, self.failNum)
def countNum(self):
return self.getNum+self.failNum
def main(*args):
baidu = BaiduSpider()
word = key_word.get()
n = int(page.get())
# print(baidu.getNum())
def loadPic():
# global key_word
# global page
global choice
global status
status['text']='load...'
if choice==1:
result = baidu.load(word, n=n)
else:
result = baidu.load(word, n=n, mode=0, cg='wallpaper')
status['text']=result
def loadStatus(*args):
# print(baidu.countNum())
total = n*30
done = baidu.countNum()
while done<total:
done = baidu.countNum()
p1["value"] = done*100/total
root.update()
# print(done)
time.sleep(0.5)
threading.Thread(target=loadPic).start()
threading.Thread(target=loadStatus).start()
return
root = Tk()
root.title('百度图片爬虫')
f = Frame(root)
f.pack(padx=10, pady=(10, 5))
f2 = Frame(root)
Label(f, text="关键字:").grid(row=0, padx=5, pady=(0, 5), sticky=E)
key_word = Entry(f)
key_word.grid(row=0, column=1, padx=(0, 5), pady=(0, 5))
Label(f, text="下载页数:").grid(row=1, padx=5)
page = Entry(f)
page.insert(0,'1')
page.grid(row=1, column=1, padx=(0, 5))
f3 = Frame(root)
f3.pack(padx=10)
f2.pack(padx=10, pady=(5, 10))
choice = IntVar()
Radiobutton(f3, text="普通", variable=choice, value=1).grid(row=0, column=0, padx=5)
Radiobutton(f3, text="壁纸", variable=choice, value=2).grid(row=0, column=1, padx=5)
choice.set(1)
searchBtn = Button(f2, text='下载',command=main)
searchBtn.grid(row=0)
p1 = Progressbar(f2, mode="determinate", orient=HORIZONTAL)
p1.grid(row=0,column=1, padx=(5,0))
p1["maximum"] = 100
p1["value"] = 0
key_word.focus_set()
status = Label(root, text="")
status.pack(side=BOTTOM, fill=X)
root.mainloop()
| gpl-2.0 | -7,292,299,521,168,612,000 | 38.573171 | 626 | 0.502773 | false |
kittiu/sale-workflow | sale_quotation_sourcing/__manifest__.py | 1 | 1758 | # -*- coding: utf-8 -*-
#
# Author: Alexandre Fayolle, Leonardo Pistone
# Copyright 2014-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': "Sale Quotation Sourcing",
'summary': "manual sourcing of sale quotations",
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': "http://www.camptocamp.com",
'category': 'Sales',
'version': '8.0.0.3.1',
'depends': ['sale_stock',
'purchase',
'stock_dropshipping',
'sale_exception',
'sale_procurement_group_by_line'],
'data': ['views/sale_order_sourcing.xml',
'views/sale_order.xml',
'security/group.xml',
'data/exceptions.xml',
],
'test': [
'test/setup_user.yml',
'test/setup_product.yml',
'test/setup_dropshipping.xml',
'test/test_standard_mto_sourcing.yml',
'test/test_standard_dropshipping.yml',
'test/test_manual_mto_sourcing.yml',
'test/test_manual_sourcing_dropshipping.yml',
],
'installable': False,
}
| agpl-3.0 | 2,922,912,429,182,100,000 | 33.470588 | 77 | 0.62628 | false |
optima-ict/odoo | addons/website_forum/models/forum.py | 1 | 44184 | # -*- coding: utf-8 -*-
from datetime import datetime
import itertools
import logging
import math
import re
import uuid
from werkzeug.exceptions import Forbidden
from openerp import _
from openerp import api, fields, models
from openerp import http
from openerp import modules
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
class KarmaError(Forbidden):
""" Karma-related error, used for forum and posts. """
pass
class Forum(models.Model):
_name = 'forum.forum'
_description = 'Forum'
_inherit = ['mail.thread', 'website.seo.metadata']
def init(self, cr):
""" Add forum uuid for user email validation.
TDE TODO: move me somewhere else, auto_init ? """
forum_uuids = self.pool['ir.config_parameter'].search(cr, SUPERUSER_ID, [('key', '=', 'website_forum.uuid')])
if not forum_uuids:
self.pool['ir.config_parameter'].set_param(cr, SUPERUSER_ID, 'website_forum.uuid', str(uuid.uuid4()), ['base.group_system'])
@api.model
def _get_default_faq(self):
fname = modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
# description and use
name = fields.Char('Forum Name', required=True, translate=True)
faq = fields.Html('Guidelines', default=_get_default_faq, translate=True)
description = fields.Text(
'Description',
translate=True,
default='This community is for professionals and enthusiasts of our products and services. '
'Share and discuss the best content and new marketing ideas, '
'build your professional profile and become a better marketer together.')
welcome_message = fields.Html(
'Welcome Message',
default = """<section class="bg-info" style="height: 168px;"><div class="container">
<div class="row">
<div class="col-md-12">
<h1 class="text-center" style="text-align: left;">Welcome!</h1>
<p class="text-muted text-center" style="text-align: left;">This community is for professionals and enthusiasts of our products and services. Share and discuss the best content and new marketing ideas, build your professional profile and become a better marketer together.</p>
</div>
<div class="col-md-12">
<a href="#" class="js_close_intro">Hide Intro</a> <a class="btn btn-primary forum_register_url" href="/web/login">Register</a> </div>
</div>
</div>
</section>""")
default_order = fields.Selection([
('create_date desc', 'Newest'),
('write_date desc', 'Last Updated'),
('vote_count desc', 'Most Voted'),
('relevancy desc', 'Relevance'),
('child_count desc', 'Answered')],
string='Default Order', required=True, default='write_date desc')
relevancy_post_vote = fields.Float('First Relevance Parameter', default=0.8, help="This formula is used in order to sort by relevance. The variable 'votes' represents number of votes for a post, and 'days' is number of days since the post creation")
relevancy_time_decay = fields.Float('Second Relevance Parameter', default=1.8)
default_post_type = fields.Selection([
('question', 'Question'),
('discussion', 'Discussion'),
('link', 'Link')],
string='Default Post', required=True, default='question')
allow_question = fields.Boolean('Questions', help="Users can answer only once per question. Contributors can edit answers and mark the right ones.", default=True)
allow_discussion = fields.Boolean('Discussions', default=True)
allow_link = fields.Boolean('Links', help="When clicking on the post, it redirects to an external link", default=True)
allow_bump = fields.Boolean('Allow Bump', default=True,
help='Check this box to display a popup for posts older than 10 days '
'without any given answer. The popup will offer to share it on social '
'networks. When shared, a question is bumped at the top of the forum.')
allow_share = fields.Boolean('Sharing Options', default=True,
help='After posting the user will be proposed to share its question '
'or answer on social networks, enabling social network propagation '
'of the forum content.')
count_posts_waiting_validation = fields.Integer(string="Number of posts waiting for validation", compute='_compute_count_posts_waiting_validation')
count_flagged_posts = fields.Integer(string='Number of flagged posts', compute='_compute_count_flagged_posts')
# karma generation
karma_gen_question_new = fields.Integer(string='Asking a question', default=2)
karma_gen_question_upvote = fields.Integer(string='Question upvoted', default=5)
karma_gen_question_downvote = fields.Integer(string='Question downvoted', default=-2)
karma_gen_answer_upvote = fields.Integer(string='Answer upvoted', default=10)
karma_gen_answer_downvote = fields.Integer(string='Answer downvoted', default=-2)
karma_gen_answer_accept = fields.Integer(string='Accepting an answer', default=2)
karma_gen_answer_accepted = fields.Integer(string='Answer accepted', default=15)
karma_gen_answer_flagged = fields.Integer(string='Answer flagged', default=-100)
# karma-based actions
karma_ask = fields.Integer(string='Ask questions', default=3)
karma_answer = fields.Integer(string='Answer questions', default=3)
karma_edit_own = fields.Integer(string='Edit own posts', default=1)
karma_edit_all = fields.Integer(string='Edit all posts', default=300)
karma_close_own = fields.Integer(string='Close own posts', default=100)
karma_close_all = fields.Integer(string='Close all posts', default=500)
karma_unlink_own = fields.Integer(string='Delete own posts', default=500)
karma_unlink_all = fields.Integer(string='Delete all posts', default=1000)
karma_upvote = fields.Integer(string='Upvote', default=5)
karma_downvote = fields.Integer(string='Downvote', default=50)
karma_answer_accept_own = fields.Integer(string='Accept an answer on own questions', default=20)
karma_answer_accept_all = fields.Integer(string='Accept an answer to all questions', default=500)
karma_comment_own = fields.Integer(string='Comment own posts', default=1)
karma_comment_all = fields.Integer(string='Comment all posts', default=1)
karma_comment_convert_own = fields.Integer(string='Convert own answers to comments and vice versa', default=50)
karma_comment_convert_all = fields.Integer(string='Convert all answers to comments and vice versa', default=500)
karma_comment_unlink_own = fields.Integer(string='Unlink own comments', default=50)
karma_comment_unlink_all = fields.Integer(string='Unlink all comments', default=500)
karma_retag = fields.Integer(string='Change question tags', default=75)
karma_flag = fields.Integer(string='Flag a post as offensive', default=500)
karma_dofollow = fields.Integer(string='Nofollow links', help='If the author has not enough karma, a nofollow attribute is added to links', default=500)
karma_editor = fields.Integer(string='Editor Features: image and links',
default=30, oldname='karma_editor_link_files')
karma_user_bio = fields.Integer(string='Display detailed user biography', default=750)
karma_post = fields.Integer(string='Ask questions without validation', default=100)
karma_moderate = fields.Integer(string='Moderate posts', default=1000)
@api.one
@api.constrains('allow_question', 'allow_discussion', 'allow_link', 'default_post_type')
def _check_default_post_type(self):
if (self.default_post_type == 'question' and not self.allow_question) \
or (self.default_post_type == 'discussion' and not self.allow_discussion) \
or (self.default_post_type == 'link' and not self.allow_link):
raise UserError(_('You cannot choose %s as default post since the forum does not allow it.' % self.default_post_type))
@api.one
@api.constrains('allow_link', 'allow_question', 'allow_discussion', 'default_post_type')
def _check_default_post_type(self):
if self.default_post_type == 'link' and not self.allow_link or self.default_post_type == 'question' and not self.allow_question or self.default_post_type == 'discussion' and not self.allow_discussion:
raise Warning(_('Post type in "Default post" must be activated'))
@api.one
def _compute_count_posts_waiting_validation(self):
domain = [('forum_id', '=', self.id), ('state', '=', 'pending')]
self.count_posts_waiting_validation = self.env['forum.post'].search_count(domain)
@api.one
def _compute_count_flagged_posts(self):
domain = [('forum_id', '=', self.id), ('state', '=', 'flagged')]
self.count_flagged_posts = self.env['forum.post'].search_count(domain)
@api.model
def create(self, values):
return super(Forum, self.with_context(mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)
@api.model
def _tag_to_write_vals(self, tags=''):
User = self.env['res.users']
Tag = self.env['forum.tag']
post_tags = []
existing_keep = []
for tag in filter(None, tags.split(',')):
if tag.startswith('_'): # it's a new tag
# check that not arleady created meanwhile or maybe excluded by the limit on the search
tag_ids = Tag.search([('name', '=', tag[1:])])
if tag_ids:
existing_keep.append(int(tag_ids[0]))
else:
# check if user have Karma needed to create need tag
user = User.sudo().browse(self._uid)
if user.exists() and user.karma >= self.karma_retag:
post_tags.append((0, 0, {'name': tag[1:], 'forum_id': self.id}))
else:
existing_keep.append(int(tag))
post_tags.insert(0, [6, 0, existing_keep])
return post_tags
def get_tags_first_char(self):
""" get set of first letter of forum tags """
tags = self.env['forum.tag'].search([('forum_id', '=', self.id), ('posts_count', '>', 0)])
return sorted(set([tag.name[0].upper() for tag in tags]))
class Post(models.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC, write_date DESC"
name = fields.Char('Title')
forum_id = fields.Many2one('forum.forum', string='Forum', required=True)
content = fields.Html('Content', strip_style=True)
plain_content = fields.Text('Plain Content', compute='_get_plain_content', store=True)
content_link = fields.Char('URL', help="URL of Link Articles")
tag_ids = fields.Many2many('forum.tag', 'forum_tag_rel', 'forum_id', 'forum_tag_id', string='Tags')
state = fields.Selection([('active', 'Active'), ('pending', 'Waiting Validation'), ('close', 'Close'), ('offensive', 'Offensive'), ('flagged', 'Flagged')], string='Status', default='active')
views = fields.Integer('Number of Views', default=0)
active = fields.Boolean('Active', default=True)
post_type = fields.Selection([
('question', 'Question'),
('link', 'Article'),
('discussion', 'Discussion')],
string='Type', default='question', required=True)
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: ['&', ('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])],
string='Post Messages', help="Comments on forum post",
)
# history
create_date = fields.Datetime('Asked on', select=True, readonly=True)
create_uid = fields.Many2one('res.users', string='Created by', select=True, readonly=True)
write_date = fields.Datetime('Update on', select=True, readonly=True)
bump_date = fields.Datetime('Bumped on', readonly=True,
help="Technical field allowing to bump a question. Writing on this field will trigger "
"a write on write_date and therefore bump the post. Directly writing on write_date "
"is currently not supported and this field is a workaround.")
write_uid = fields.Many2one('res.users', string='Updated by', select=True, readonly=True)
relevancy = fields.Float('Relevance', compute="_compute_relevancy", store=True)
# vote
vote_ids = fields.One2many('forum.post.vote', 'post_id', string='Votes')
user_vote = fields.Integer('My Vote', compute='_get_user_vote')
vote_count = fields.Integer('Votes', compute='_get_vote_count', store=True)
# favorite
favourite_ids = fields.Many2many('res.users', string='Favourite')
user_favourite = fields.Boolean('Is Favourite', compute='_get_user_favourite')
favourite_count = fields.Integer('Favorite Count', compute='_get_favorite_count', store=True)
# hierarchy
is_correct = fields.Boolean('Correct', help='Correct answer or answer accepted')
parent_id = fields.Many2one('forum.post', string='Question', ondelete='cascade')
self_reply = fields.Boolean('Reply to own question', compute='_is_self_reply', store=True)
child_ids = fields.One2many('forum.post', 'parent_id', string='Answers')
child_count = fields.Integer('Number of answers', compute='_get_child_count', store=True)
uid_has_answered = fields.Boolean('Has Answered', compute='_get_uid_has_answered')
has_validated_answer = fields.Boolean('Is answered', compute='_get_has_validated_answer', store=True)
# offensive moderation tools
flag_user_id = fields.Many2one('res.users', string='Flagged by')
moderator_id = fields.Many2one('res.users', string='Reviewed by', readonly=True)
# closing
closed_reason_id = fields.Many2one('forum.post.reason', string='Reason')
closed_uid = fields.Many2one('res.users', string='Closed by', select=1)
closed_date = fields.Datetime('Closed on', readonly=True)
# karma calculation and access
karma_accept = fields.Integer('Convert comment to answer', compute='_get_post_karma_rights')
karma_edit = fields.Integer('Karma to edit', compute='_get_post_karma_rights')
karma_close = fields.Integer('Karma to close', compute='_get_post_karma_rights')
karma_unlink = fields.Integer('Karma to unlink', compute='_get_post_karma_rights')
karma_comment = fields.Integer('Karma to comment', compute='_get_post_karma_rights')
karma_comment_convert = fields.Integer('Karma to convert comment to answer', compute='_get_post_karma_rights')
karma_flag = fields.Integer('Flag a post as offensive', compute='_get_post_karma_rights')
can_ask = fields.Boolean('Can Ask', compute='_get_post_karma_rights')
can_answer = fields.Boolean('Can Answer', compute='_get_post_karma_rights')
can_accept = fields.Boolean('Can Accept', compute='_get_post_karma_rights')
can_edit = fields.Boolean('Can Edit', compute='_get_post_karma_rights')
can_close = fields.Boolean('Can Close', compute='_get_post_karma_rights')
can_unlink = fields.Boolean('Can Unlink', compute='_get_post_karma_rights')
can_upvote = fields.Boolean('Can Upvote', compute='_get_post_karma_rights')
can_downvote = fields.Boolean('Can Downvote', compute='_get_post_karma_rights')
can_comment = fields.Boolean('Can Comment', compute='_get_post_karma_rights')
can_comment_convert = fields.Boolean('Can Convert to Comment', compute='_get_post_karma_rights')
can_view = fields.Boolean('Can View', compute='_get_post_karma_rights')
can_display_biography = fields.Boolean("Is the author's biography visible from his post", compute='_get_post_karma_rights')
can_post = fields.Boolean('Can Automatically be Validated', compute='_get_post_karma_rights')
can_flag = fields.Boolean('Can Flag', compute='_get_post_karma_rights')
can_moderate = fields.Boolean('Can Moderate', compute='_get_post_karma_rights')
@api.one
@api.depends('content')
def _get_plain_content(self):
self.plain_content = tools.html2plaintext(self.content)[0:500] if self.content else False
@api.one
@api.depends('vote_count', 'forum_id.relevancy_post_vote', 'forum_id.relevancy_time_decay')
def _compute_relevancy(self):
if self.create_date:
days = (datetime.today() - datetime.strptime(self.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)).days
self.relevancy = math.copysign(1, self.vote_count) * (abs(self.vote_count - 1) ** self.forum_id.relevancy_post_vote / (days + 2) ** self.forum_id.relevancy_time_decay)
else:
self.relevancy = 0
@api.multi
def _get_user_vote(self):
votes = self.env['forum.post.vote'].search_read([('post_id', 'in', self._ids), ('user_id', '=', self._uid)], ['vote', 'post_id'])
mapped_vote = dict([(v['post_id'][0], v['vote']) for v in votes])
for vote in self:
vote.user_vote = mapped_vote.get(vote.id, 0)
@api.multi
@api.depends('vote_ids.vote')
def _get_vote_count(self):
read_group_res = self.env['forum.post.vote'].read_group([('post_id', 'in', self._ids)], ['post_id', 'vote'], ['post_id', 'vote'], lazy=False)
result = dict.fromkeys(self._ids, 0)
for data in read_group_res:
result[data['post_id'][0]] += data['__count'] * int(data['vote'])
for post in self:
post.vote_count = result[post.id]
@api.one
def _get_user_favourite(self):
self.user_favourite = self._uid in self.favourite_ids.ids
@api.one
@api.depends('favourite_ids')
def _get_favorite_count(self):
self.favourite_count = len(self.favourite_ids)
@api.one
@api.depends('create_uid', 'parent_id')
def _is_self_reply(self):
self.self_reply = self.parent_id.create_uid.id == self._uid
@api.one
@api.depends('child_ids.create_uid', 'website_message_ids')
def _get_child_count(self):
def process(node):
total = len(node.website_message_ids) + len(node.child_ids)
for child in node.child_ids:
total += process(child)
return total
self.child_count = process(self)
@api.one
def _get_uid_has_answered(self):
self.uid_has_answered = any(answer.create_uid.id == self._uid for answer in self.child_ids)
@api.one
@api.depends('child_ids.is_correct')
def _get_has_validated_answer(self):
self.has_validated_answer = any(answer.is_correct for answer in self.child_ids)
@api.multi
def _get_post_karma_rights(self):
user = self.env.user
is_admin = user.id == SUPERUSER_ID
# sudoed recordset instead of individual posts so values can be
# prefetched in bulk
for post, post_sudo in itertools.izip(self, self.sudo()):
is_creator = post.create_uid == user
post.karma_accept = post.forum_id.karma_answer_accept_own if post.parent_id.create_uid == user else post.forum_id.karma_answer_accept_all
post.karma_edit = post.forum_id.karma_edit_own if is_creator else post.forum_id.karma_edit_all
post.karma_close = post.forum_id.karma_close_own if is_creator else post.forum_id.karma_close_all
post.karma_unlink = post.forum_id.karma_unlink_own if is_creator else post.forum_id.karma_unlink_all
post.karma_comment = post.forum_id.karma_comment_own if is_creator else post.forum_id.karma_comment_all
post.karma_comment_convert = post.forum_id.karma_comment_convert_own if is_creator else post.forum_id.karma_comment_convert_all
post.can_ask = is_admin or user.karma >= post.forum_id.karma_ask
post.can_answer = is_admin or user.karma >= post.forum_id.karma_answer
post.can_accept = is_admin or user.karma >= post.karma_accept
post.can_edit = is_admin or user.karma >= post.karma_edit
post.can_close = is_admin or user.karma >= post.karma_close
post.can_unlink = is_admin or user.karma >= post.karma_unlink
post.can_upvote = is_admin or user.karma >= post.forum_id.karma_upvote
post.can_downvote = is_admin or user.karma >= post.forum_id.karma_downvote
post.can_comment = is_admin or user.karma >= post.karma_comment
post.can_comment_convert = is_admin or user.karma >= post.karma_comment_convert
post.can_view = is_admin or user.karma >= post.karma_close or post_sudo.create_uid.karma > 0
post.can_display_biography = is_admin or post_sudo.create_uid.karma >= post.forum_id.karma_user_bio
post.can_post = is_admin or user.karma >= post.forum_id.karma_post
post.can_flag = is_admin or user.karma >= post.forum_id.karma_flag
post.can_moderate = is_admin or user.karma >= post.forum_id.karma_moderate
@api.one
@api.constrains('post_type', 'forum_id')
def _check_post_type(self):
if (self.post_type == 'question' and not self.forum_id.allow_question) \
or (self.post_type == 'discussion' and not self.forum_id.allow_discussion) \
or (self.post_type == 'link' and not self.forum_id.allow_link):
raise UserError(_('This forum does not allow %s' % self.post_type))
def _update_content(self, content, forum_id):
forum = self.env['forum.forum'].browse(forum_id)
if content and self.env.user.karma < forum.karma_dofollow:
for match in re.findall(r'<a\s.*href=".*?">', content):
content = re.sub(match, match[:3] + 'rel="nofollow" ' + match[3:], content)
if self.env.user.karma <= forum.karma_editor:
filter_regexp = r'(<img.*?>)|(<a[^>]*?href[^>]*?>)|(<[a-z|A-Z]+[^>]*style\s*=\s*[\'"][^\'"]*\s*background[^:]*:[^url;]*url)'
content_match = re.search(filter_regexp, content, re.I)
if content_match:
raise KarmaError('User karma not sufficient to post an image or link.')
return content
@api.model
def create(self, vals):
if 'content' in vals and vals.get('forum_id'):
vals['content'] = self._update_content(vals['content'], vals['forum_id'])
post = super(Post, self.with_context(mail_create_nolog=True)).create(vals)
# deleted or closed questions
if post.parent_id and (post.parent_id.state == 'close' or post.parent_id.active is False):
raise UserError(_('Posting answer on a [Deleted] or [Closed] question is not possible'))
# karma-based access
if not post.parent_id and not post.can_ask:
raise KarmaError('Not enough karma to create a new question')
elif post.parent_id and not post.can_answer:
raise KarmaError('Not enough karma to answer to a question')
if not post.parent_id and not post.can_post:
post.state = 'pending'
# add karma for posting new questions
if not post.parent_id and post.state == 'active':
self.env.user.sudo().add_karma(post.forum_id.karma_gen_question_new)
post.post_notification()
return post
@api.model
def check_mail_message_access(self, res_ids, operation, model_name=None):
if operation in ('write', 'unlink') and (not model_name or model_name == 'forum.post'):
# Make sure only author or moderator can edit/delete messages
if any(not post.can_edit for post in self.browse(res_ids)):
raise KarmaError('Not enough karma to edit a post.')
return super(Post, self).check_mail_message_access(res_ids, operation, model_name=model_name)
@api.multi
@api.depends('name', 'post_type')
def name_get(self):
result = []
for post in self:
if post.post_type == 'discussion' and post.parent_id and not post.name:
result.append((post.id, '%s (%s)' % (post.parent_id.name, post.id)))
else:
result.append((post.id, '%s' % (post.name)))
return result
@api.multi
def write(self, vals):
if 'content' in vals:
vals['content'] = self._update_content(vals['content'], self.forum_id.id)
if 'state' in vals:
if vals['state'] in ['active', 'close'] and any(not post.can_close for post in self):
raise KarmaError('Not enough karma to close or reopen a post.')
if 'active' in vals:
if any(not post.can_unlink for post in self):
raise KarmaError('Not enough karma to delete or reactivate a post')
if 'is_correct' in vals:
if any(not post.can_accept for post in self):
raise KarmaError('Not enough karma to accept or refuse an answer')
# update karma except for self-acceptance
mult = 1 if vals['is_correct'] else -1
for post in self:
if vals['is_correct'] != post.is_correct and post.create_uid.id != self._uid:
post.create_uid.sudo().add_karma(post.forum_id.karma_gen_answer_accepted * mult)
self.env.user.sudo().add_karma(post.forum_id.karma_gen_answer_accept * mult)
if any(key not in ['state', 'active', 'is_correct', 'closed_uid', 'closed_date', 'closed_reason_id'] for key in vals.keys()) and any(not post.can_edit for post in self):
raise KarmaError('Not enough karma to edit a post.')
res = super(Post, self).write(vals)
# if post content modify, notify followers
if 'content' in vals or 'name' in vals:
for post in self:
if post.parent_id:
body, subtype = _('Answer Edited'), 'website_forum.mt_answer_edit'
obj_id = post.parent_id
else:
body, subtype = _('Question Edited'), 'website_forum.mt_question_edit'
obj_id = post
obj_id.message_post(body=body, subtype=subtype)
return res
@api.multi
def post_notification(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for post in self:
if post.state == 'active' and post.parent_id:
body = _(
'<p>A new answer for <i>%s</i> has been posted. <a href="%s/forum/%s/question/%s">Click here to access the post.</a></p>' %
(post.parent_id.name, base_url, slug(post.parent_id.forum_id), slug(post.parent_id))
)
post.parent_id.message_post(subject=_('Re: %s') % post.parent_id.name, body=body, subtype='website_forum.mt_answer_new')
elif post.state == 'active' and not post.parent_id:
body = _(
'<p>A new question <i>%s</i> has been asked on %s. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
post.message_post(subject=post.name, body=body, subtype='website_forum.mt_question_new')
elif post.state == 'pending' and not post.parent_id:
# TDE FIXME: in master, you should probably use a subtype;
# however here we remove subtype but set partner_ids
partners = post.sudo().message_partner_ids.filtered(lambda partner: partner.user_ids and partner.user_ids.karma >= post.forum_id.karma_moderate)
note_subtype = self.sudo().env.ref('mail.mt_note')
body = _(
'<p>A new question <i>%s</i> has been asked on %s and require your validation. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
post.message_post(subject=post.name, body=body, subtype_id=note_subtype.id, partner_ids=partners.ids)
return True
@api.multi
def reopen(self):
if any(post.parent_id or post.state != 'close' for post in self):
return False
reason_offensive = self.env.ref('website_forum.reason_7')
reason_spam = self.env.ref('website_forum.reason_8')
for post in self:
if post.closed_reason_id in (reason_offensive, reason_spam):
_logger.info('Upvoting user <%s>, reopening spam/offensive question',
post.create_uid)
post.create_uid.sudo().add_karma(post.forum_id.karma_gen_answer_flagged * -1)
self.sudo().write({'state': 'active'})
@api.multi
def close(self, reason_id):
if any(post.parent_id for post in self):
return False
reason_offensive = self.env.ref('website_forum.reason_7').id
reason_spam = self.env.ref('website_forum.reason_8').id
if reason_id in (reason_offensive, reason_spam):
for post in self:
_logger.info('Downvoting user <%s> for posting spam/offensive contents',
post.create_uid)
post.create_uid.sudo().add_karma(post.forum_id.karma_gen_answer_flagged)
self.write({
'state': 'close',
'closed_uid': self._uid,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
})
return True
@api.one
def validate(self):
if not self.can_moderate:
raise KarmaError('Not enough karma to validate a post')
# if state == pending, no karma previously added for the new question
if self.state == 'pending':
self.create_uid.sudo().add_karma(self.forum_id.karma_gen_question_new)
self.write({
'state': 'active',
'active': True,
'moderator_id': self.env.user.id,
})
self.post_notification()
return True
@api.one
def refuse(self):
if not self.can_moderate:
raise KarmaError('Not enough karma to refuse a post')
self.moderator_id = self.env.user
return True
@api.one
def flag(self):
if not self.can_flag:
raise KarmaError('Not enough karma to flag a post')
if(self.state == 'flagged'):
return {'error': 'post_already_flagged'}
elif(self.state == 'active'):
self.write({
'state': 'flagged',
'flag_user_id': self.env.user.id,
})
return self.can_moderate and {'success': 'post_flagged_moderator'} or {'success': 'post_flagged_non_moderator'}
else:
return {'error': 'post_non_flaggable'}
@api.one
def mark_as_offensive(self, reason_id):
if not self.can_moderate:
raise KarmaError('Not enough karma to mark a post as offensive')
# remove some karma
_logger.info('Downvoting user <%s> for posting spam/offensive contents', self.create_uid)
self.create_uid.sudo().add_karma(self.forum_id.karma_gen_answer_flagged)
self.write({
'state': 'offensive',
'moderator_id': self.env.user.id,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
'active': False,
})
return True
@api.multi
def unlink(self):
if any(not post.can_unlink for post in self):
raise KarmaError('Not enough karma to unlink a post')
# if unlinking an answer with accepted answer: remove provided karma
for post in self:
if post.is_correct:
post.create_uid.sudo().add_karma(post.forum_id.karma_gen_answer_accepted * -1)
self.env.user.sudo().add_karma(post.forum_id.karma_gen_answer_accepted * -1)
return super(Post, self).unlink()
@api.multi
def bump(self):
""" Bump a question: trigger a write_date by writing on a dummy bump_date
field. One cannot bump a question more than once every 10 days. """
self.ensure_one()
if self.forum_id.allow_bump and not self.child_ids and (datetime.today() - datetime.strptime(self.write_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)).days > 9:
# write through super to bypass karma; sudo to allow public user to bump any post
return self.sudo().write({'bump_date': fields.Datetime.now()})
return False
@api.multi
def vote(self, upvote=True):
Vote = self.env['forum.post.vote']
vote_ids = Vote.search([('post_id', 'in', self._ids), ('user_id', '=', self._uid)])
new_vote = '1' if upvote else '-1'
voted_forum_ids = set()
if vote_ids:
for vote in vote_ids:
if upvote:
new_vote = '0' if vote.vote == '-1' else '1'
else:
new_vote = '0' if vote.vote == '1' else '-1'
vote.vote = new_vote
voted_forum_ids.add(vote.post_id.id)
for post_id in set(self._ids) - voted_forum_ids:
for post_id in self._ids:
Vote.create({'post_id': post_id, 'vote': new_vote})
return {'vote_count': self.vote_count, 'user_vote': new_vote}
@api.one
def convert_answer_to_comment(self):
""" Tools to convert an answer (forum.post) to a comment (mail.message).
The original post is unlinked and a new comment is posted on the question
using the post create_uid as the comment's author. """
if not self.parent_id:
return False
# karma-based action check: use the post field that computed own/all value
if not self.can_comment_convert:
raise KarmaError('Not enough karma to convert an answer to a comment')
# post the message
question = self.parent_id
values = {
'author_id': self.sudo().create_uid.partner_id.id, # use sudo here because of access to res.users model
'body': tools.html_sanitize(self.content, strict=True, strip_style=True, strip_classes=True),
'message_type': 'comment',
'subtype': 'mail.mt_comment',
'date': self.create_date,
}
new_message = self.browse(question.id).with_context(mail_create_nosubscribe=True).message_post(**values)
# unlink the original answer, using SUPERUSER_ID to avoid karma issues
self.sudo().unlink()
return new_message
@api.model
def convert_comment_to_answer(self, message_id, default=None):
""" Tool to convert a comment (mail.message) into an answer (forum.post).
The original comment is unlinked and a new answer from the comment's author
is created. Nothing is done if the comment's author already answered the
question. """
comment = self.env['mail.message'].sudo().browse(message_id)
post = self.browse(comment.res_id)
if not comment.author_id or not comment.author_id.user_ids: # only comment posted by users can be converted
return False
# karma-based action check: must check the message's author to know if own / all
karma_convert = comment.author_id.id == self.env.user.partner_id.id and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all
can_convert = self.env.user.karma >= karma_convert
if not can_convert:
raise KarmaError('Not enough karma to convert a comment to an answer')
# check the message's author has not already an answer
question = post.parent_id if post.parent_id else post
post_create_uid = comment.author_id.user_ids[0]
if any(answer.create_uid.id == post_create_uid.id for answer in question.child_ids):
return False
# create the new post
post_values = {
'forum_id': question.forum_id.id,
'content': comment.body,
'parent_id': question.id,
}
# done with the author user to have create_uid correctly set
new_post = self.sudo(post_create_uid.id).create(post_values)
# delete comment
comment.unlink()
return new_post
@api.one
def unlink_comment(self, message_id):
user = self.env.user
comment = self.env['mail.message'].sudo().browse(message_id)
if not comment.model == 'forum.post' or not comment.res_id == self.id:
return False
# karma-based action check: must check the message's author to know if own or all
karma_unlink = comment.author_id.id == user.partner_id.id and self.forum_id.karma_comment_unlink_own or self.forum_id.karma_comment_unlink_all
can_unlink = user.karma >= karma_unlink
if not can_unlink:
raise KarmaError('Not enough karma to unlink a comment')
return comment.unlink()
@api.multi
def set_viewed(self):
self._cr.execute("""UPDATE forum_post SET views = views+1 WHERE id IN %s""", (self._ids,))
return True
@api.multi
def get_access_action(self):
""" Override method that generated the link to access the document. Instead
of the classic form view, redirect to the post on the website directly """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'url': '/forum/%s/question/%s' % (self.forum_id.id, self.id),
'target': 'self',
'res_id': self.id,
}
@api.multi
def _notification_get_recipient_groups(self, message, recipients):
""" Override to set the access button: everyone can see an access button
on their notification email. It will lead on the website view of the
post. """
res = super(Post, self)._notification_get_recipient_groups(message, recipients)
access_action = self._notification_link_helper('view', model=message.model, res_id=message.res_id)
for category, data in res.iteritems():
res[category]['button_access'] = {'url': access_action, 'title': '%s %s' % (_('View'), self.post_type)}
return res
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, message_type='notification', subtype=None, context=None, **kwargs):
if thread_id and message_type == 'comment': # user comments have a restriction on karma
if isinstance(thread_id, (list, tuple)):
post_id = thread_id[0]
else:
post_id = thread_id
post = self.browse(cr, uid, post_id, context=context)
# TDE FIXME: trigger browse because otherwise the function field is not compted - check with RCO
tmp1, tmp2 = post.karma_comment, post.can_comment
user = self.pool['res.users'].browse(cr, uid, uid)
tmp3 = user.karma
# TDE END FIXME
if not post.can_comment:
raise KarmaError('Not enough karma to comment')
return super(Post, self).message_post(cr, uid, thread_id, message_type=message_type, subtype=subtype, context=context, **kwargs)
class PostReason(models.Model):
_name = "forum.post.reason"
_description = "Post Closing Reason"
_order = 'name'
name = fields.Char(string='Closing Reason', required=True, translate=True)
reason_type = fields.Char(string='Reason Type')
class Vote(models.Model):
_name = 'forum.post.vote'
_description = 'Vote'
post_id = fields.Many2one('forum.post', string='Post', ondelete='cascade', required=True)
user_id = fields.Many2one('res.users', string='User', required=True, default=lambda self: self._uid)
vote = fields.Selection([('1', '1'), ('-1', '-1'), ('0', '0')], string='Vote', required=True, default='1')
create_date = fields.Datetime('Create Date', select=True, readonly=True)
forum_id = fields.Many2one('forum.forum', string='Forum', related="post_id.forum_id", store=True)
recipient_id = fields.Many2one('res.users', string='To', related="post_id.create_uid", store=True)
def _get_karma_value(self, old_vote, new_vote, up_karma, down_karma):
_karma_upd = {
'-1': {'-1': 0, '0': -1 * down_karma, '1': -1 * down_karma + up_karma},
'0': {'-1': 1 * down_karma, '0': 0, '1': up_karma},
'1': {'-1': -1 * up_karma + down_karma, '0': -1 * up_karma, '1': 0}
}
return _karma_upd[old_vote][new_vote]
@api.model
def create(self, vals):
vote = super(Vote, self).create(vals)
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise UserError(_('Not allowed to vote for its own post'))
# karma check
if vote.vote == '1' and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif vote.vote == '-1' and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
if vote.post_id.parent_id:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
vote.recipient_id.sudo().add_karma(karma_value)
return vote
@api.multi
def write(self, values):
if 'vote' in values:
for vote in self:
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise UserError(_('Not allowed to vote for its own post'))
# karma check
if (values['vote'] == '1' or vote.vote == '-1' and values['vote'] == '0') and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif (values['vote'] == '-1' or vote.vote == '1' and values['vote'] == '0') and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
vote.recipient_id.sudo().add_karma(karma_value)
res = super(Vote, self).write(values)
return res
class Tags(models.Model):
_name = "forum.tag"
_description = "Forum Tag"
_inherit = ['website.seo.metadata']
name = fields.Char('Name', required=True)
create_uid = fields.Many2one('res.users', string='Created by', readonly=True)
forum_id = fields.Many2one('forum.forum', string='Forum', required=True)
post_ids = fields.Many2many('forum.post', 'forum_tag_rel', 'forum_tag_id', 'forum_id', string='Posts')
posts_count = fields.Integer('Number of Posts', compute='_get_posts_count', store=True)
_sql_constraints = [
('name_uniq', 'unique (name, forum_id)', "Tag name already exists !"),
]
@api.multi
@api.depends("post_ids.tag_ids")
def _get_posts_count(self):
for tag in self:
tag.posts_count = len(tag.post_ids)
| agpl-3.0 | -4,066,895,212,376,344,600 | 50.798359 | 308 | 0.619546 | false |
kevindong/misc. | battleship.py | 1 | 13077 | ###############################################################################
# battleship #
# #
# Originally based off of the battleship game that the Python course on #
# Codecademy had users build. Now expanded and in desperate need of polishing.#
###############################################################################
# Note: This game's multiplayer function, in effect, just causes the code
# to have the same thing twice.
from random import randint
import sys # Imports stuff for restart program
import os # Imports stuff for restart program and for clearing the screen
def restart_program(): # Restarts program function
python = sys.executable
os.execl(python, python, * sys.argv)
def clear_screen(): # Clears screen function
os.system('cls' if os.name == 'nt' else 'clear')
one_board = [] # Initilizes the game board
two_board = []
for x in range(5): # Generates the empty game boards
one_board.append(["O"] * 5)
for x in range(5):
two_board.append(["O"] * 5)
def print_board(board): # Prints the board
for row in board:
print " ".join(row) # Removes the formatting when the 'board' is printed
def random_row(board): # Generates a random integer in the range 0-4
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
def verify_int(input): # Checks to see if input is an integer
try: # Tries...
val = int(input) # If input is an integer...
return val # Returns input if it is an integer
except ValueError: # If an error is returned...
return 0 # Returns a "0"
one_ship_row = random_row(one_board) # Randomly picks the ship's first
one_ship_col = random_col(one_board) # compnent's row
two_ship_row = random_row(two_board)
two_ship_col = random_col(two_board)
one_direc = randint(1, 4) # Decides the direction of the ship's second component
two_direc = randint(1, 4) # 1 = North, 2 = East, 3 = South, 4 = West
status = 0
while status == 0: # Decides the direction of the ship's second component
if one_direc == 1: # north
one_sec_ship_row = one_ship_row - 1 # The row above is selected
one_sec_ship_col = one_ship_col # The same column is selected
if one_sec_ship_row < 0: # If the row selected is outside of the bounds:
one_sec_ship_row = one_sec_ship_row + 2 # Reverses direction to the
# other side
status = 1 # Begins the selection of player 2's second component
else: # If the row selected is inside of the bounds...
status = 1 # Begins the selection of player 2's second component
elif one_direc == 2: # east
one_sec_ship_row = one_ship_row
one_sec_ship_col = one_ship_col + 1
if one_sec_ship_col > 4:
one_sec_ship_col = one_sec_ship_col - 2
status = 1
else:
status = 1
elif one_direc == 3: # south
one_sec_ship_row = one_ship_row + 1
one_sec_ship_col = one_ship_col
if one_sec_ship_row > 4:
one_sec_ship_row = one_sec_ship_row - 2
status = 1
else:
status = 1
elif one_direc == 4: # west
one_sec_ship_row = one_ship_row
one_sec_ship_col = one_ship_col - 1
if one_sec_ship_col < 0:
one_sec_ship_col = one_sec_ship_col + 2
status = 1
else:
status = 1
else:
print "Error 1: Player 1 Ship, Component 2"
while status == 1: # The same as above, but for ship 2
if two_direc == 1:
two_sec_ship_row = two_ship_row - 1
two_sec_ship_col = two_ship_col
if two_sec_ship_row < 0:
two_sec_ship_row = two_sec_ship_row + 2
status = 2 # Begins the actual game
else:
status = 2
elif two_direc == 2:
two_sec_ship_row = two_ship_row
two_sec_ship_col = two_ship_col + 1
if two_sec_ship_col > 4:
two_sec_ship_col = two_sec_ship_col - 2
status = 2
else:
status = 2
elif two_direc == 3:
two_sec_ship_row = two_ship_row + 1
two_sec_ship_col = two_ship_col
if two_sec_ship_row > 4:
two_sec_ship_row = two_sec_ship_row - 2
status = 2
else:
status = 2
elif two_direc == 4:
two_sec_ship_row = two_ship_row
two_sec_ship_col = two_ship_col - 1
if two_sec_ship_col < 0:
two_sec_ship_col = two_sec_ship_col + 2
status = 2
else:
status = 2
else:
print "Error 2: Player 2 Ship, Component 2"
clear_screen()
# The following few lines are the beginning instructions.
print "Battleship"
print ""
print ("Type in your desired number and press 'Enter' when prompted. Only "
"enter in a single number in the range of 1 to 5. The ship fills up "
"exactly 2 adjacent blocks. The following is how the grid is "
"organized.")
'''
print "Battleship"
print ""
print ("Type in your desired number and press 'Enter' when prompted. Only "
"enter in a single number in the range of 1 to 5. The ship fills up "
"exactly 2 adjacent blocks. The following is how the grid is "
"organized."
+ " (" + str(one_ship_row + 1) + ", " + str(one_ship_col + 1) + ")"
+ " (" + str(one_sec_ship_row + 1) + ", " + str(one_sec_ship_col + 1)
+ ") &" + " (" + str(two_ship_row + 1) + ", " + str(two_ship_col + 1)
+ ")" + " (" + str(two_sec_ship_row + 1) + ", "
+ str(two_sec_ship_col + 1) + ")") # Everything after the first '#' is
# used to print the answers to this
# game. It's only used for debugging
# purposes.
'''
print ""
print "O O O O O <- Row 1"
print "O O O O O <- Row 2"
print "O O O O O <- Row 3"
print "O O O O O <- Row 4"
print "O O O O O <- Row 5"
#print "^1 ^3 ^5"
#print " ^2 ^4"
print "^ Column 1"
print " ^ Column 2"
print " ^ Column 3"
print " ^ Column 4"
print " ^ Column 5"
print ""
raw_input("Press 'Enter' to begin the game")
clear_screen()
answer = raw_input("Would you like to use your own names instead of generic nam"
"es? ")
if answer.strip() in "y Y yes Yes YES".split():
player_one_name = raw_input("What's Player 1's name? ")
player_two_name = raw_input("What's Player 2's name? ")
clear_screen()
else:
player_one_name = "Player 1"
player_two_name = "Player 2"
while True: # The infinite loop that is actual gameplay.
while status == 2: # Player 2's turn
print "It is now %s's Turn." % player_two_name
print ""
print "%s's Board" % player_one_name
print ""
print_board(one_board) # Prints player one's board
print ""
guess_row = verify_int(raw_input("Guess Row: ")) # Asks for input and
# checks to see if it's
# an integer
guess_row = guess_row - int(1) # Corrects it to what actually gets
# processed by subtracting 1
guess_col = verify_int(raw_input("Guess Col: ")) # Does the same as
# above
guess_col = guess_col - int(1) # Does the same as above
if ((guess_row == one_ship_row and guess_col == one_ship_col) or
(guess_row == one_sec_ship_row and guess_col == one_sec_ship_col)):
# The winner branch
one_board[guess_row][guess_col] = "0" # Marks the board as the
# correct answer
print ""
print ("Hit!")
if ((one_board[one_ship_row][one_ship_col] == "0") and
(one_board[one_sec_ship_row][one_sec_ship_col] == "0")):
# ^ Checks to see if both components of the ship have been
# found, if so...
print ""
print "You won!"
status = 4 # Begins the celebratory statement below
else: # If both components have NOT been found this branch is
# invoked.
raw_input("Press 'Enter' to begin " + str(player_one_name) + "'s turn")
clear_screen()
status = 3 # Begins Player 1's turn
else: # The loser branch
if ((guess_row < 0 or guess_row > 4) or
(guess_col < 0 or guess_col > 4)): # Is the answer within the
# range of 0 to 4?
print ""
print "Your guess isn't on the grid. Try again."
elif one_board[guess_row][guess_col] == "X": # Has the chosen area
# already been guessed?
print ""
print "You've already guessed this space. Try again."
elif one_board[guess_row][guess_col] == "O": # Is the chosen area
# unchosen thus far?
print ""
print "You missed!"
one_board[guess_row][guess_col] = "X" # Marks the area as chosen
raw_input("Press 'Enter' to begin " + str(player_one_name) + "'s turn")
clear_screen()
status = 3 # Begins Player 2's turn
else: # The 'just-in-case' error branch used for debugging
print "Error 3: Player 1 Turn"
else:
a = 0 # Indicates that everything ran fine
while status == 3:
print "It is now %s's Turn." % player_one_name
print ""
print "%s's Board" % player_two_name
print ""
print_board(two_board)
print ""
guess_row = verify_int(raw_input("Guess Row: "))
guess_row = guess_row - int(1)
guess_col = verify_int(raw_input("Guess Col: "))
guess_col = guess_col - int(1)
if ((guess_row == two_ship_row and guess_col == two_ship_col) or
(guess_row == two_sec_ship_row and guess_col == two_sec_ship_col)):
two_board[guess_row][guess_col] = "0"
print ""
print ("Hit!")
if ((two_board[two_ship_row][two_ship_col] == "0") and
(two_board[two_sec_ship_row][two_sec_ship_col] == "0")):
print "You won!"
status = 5
else:
raw_input("Press 'Enter' to begin " + str(player_two_name) + "'s turn")
clear_screen()
status = 2 # Begins Player 2's turn
else:
if ((guess_row < 0 or guess_row > 4)
or (guess_col < 0 or guess_col > 4)):
print ""
print "Your guess isn't on the grid. Try again."
elif two_board[guess_row][guess_col] == "X":
print ""
print "You've already guessed this space. Try again."
elif two_board[guess_row][guess_col] == "O":
print ""
print "You missed!"
two_board[guess_row][guess_col] = "X"
raw_input("Press 'Enter' to begin " + str(player_two_name) + "'s turn")
clear_screen()
status = 2
else:
print "Error 4: Player 2 Turn"
else: # Something has to be here as the changing of turns, for Player 2 to
# Player 1, causes the second layered loop (while status == 3) to
# break. Which causes the original while loop to go through the
# remainder of the program until it loops back around to the beginning
# of Player 1's loop (status ==3).
a = 0
if status == 4: # If Player 2 wins...
print "%s wins!" % player_two_name
break # Breaks the initial while loop
elif status == 5: # If Player 1 wins...
print "Player %s wins!" % player_one_name
break
else: # Something has to be here as the changing of turns, for Player 2 to
# Player 1, causes the second layered loop (while status == 3) to
# break. Which causes the original while loop to go through the
# remainder of the program until it loops back around to the beginning
# of Player 1's loop (status ==3).
a = 0
else: # Used for debugging only
print "Error 5: End Error"
answer = raw_input("Do you want to subject yourself to another game? ")
if answer.strip() in "y Y yes Yes YES".split(): # Examines 'answer' for any
# synonyms for 'yes'
restart_program() # Restarts program if found. If not, the program
# terminates.
| mit | -5,181,590,986,807,869,000 | 41.457792 | 87 | 0.51992 | false |
GPNMilano/PyPRPImporter | PyPRPImport/alc_LightClasses.py | 1 | 15589 | #
# $Id: alc_Classes.py 876 2007-12-15 22:15:11Z Paradox $
#
# Copyright (C) 2005-2006 Alcugs pyprp Project Team
# See the file AUTHORS for more info about the team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Please see the file COPYING for the full license.
# Please see the file DISCLAIMER for more details, before doing nothing.
#
try:
import Blender
try:
from Blender import Mesh
from Blender import Lamp
except Exception, detail:
print detail
except ImportError:
pass
import md5, random, binascii, cStringIO, copy, Image, math, struct, StringIO, os, os.path, pickle
from alcurutypes import *
from alchexdump import *
from alc_GeomClasses import *
from alc_Functions import *
from alcConvexHull import *
from alc_AbsClasses import *
from alc_MatClasses import *
from alc_AlcScript import *
import alcconfig, alchexdump
def stripIllegalChars(name):
name=name.replace("*","_")
name=name.replace("?","_")
name=name.replace("\\","_")
name=name.replace("/","_")
name=name.replace("<","_")
name=name.replace(">","_")
name=name.replace(":","_")
name=name.replace("\"","_")
name=name.replace("|","_")
name=name.replace("#","_")
name=name.strip()
return name
class plLightInfo(plObjInterface): #Type 0x54 (Uru)
Props = \
{ \
"kDisable" : 0, \
"kLPObsolete" : 1, \
"kLPCastShadows" : 2, \
"kLPMovable" : 3, \
"kLPHasIncludes" : 4, \
"kLPIncludesChars" : 5, \
"kLP_OBSOLECTE_0" : 6, \
"kLPOverAll" : 7, \
"kLPHasSpecular" : 8, \
"kLPShadowOnly" : 9, \
"kLPShadowLightGroup" : 10, \
"kLPForceProj" : 11, \
"kNumProps" : 12 \
}
scriptProps = \
{ \
"disable" : 0, \
"obsolete" : 1, \
"castshadows" : 2, \
"movable" : 3, \
"hasincludes" : 4, \
"includeschars" : 5, \
"obsolecte_0" : 6, \
"overall" : 7, \
"hasspecular" : 8, \
"shadowonly" : 9, \
"shadowlightgroup" : 10, \
"forceproj" : 11, \
}
def __init__(self,parent,name="unnamed",type=None):
plObjInterface.__init__(self,parent,name,type)
try: #Quick, dirty fix for NameError bug with classes from alc_GeomClasses
self.ambient = RGBA('1.0','1.0','1.0','1.0',type=1)
except NameError:
#print "Damnit! Need reimport alc_GeomClasses.py"
from alc_GeomClasses import RGBA,hsMatrix44
self.ambient = RGBA('1.0','1.0','1.0','1.0',type=1)
self.diffuse = RGBA()
self.specular = RGBA()
self.LightToLocal = hsMatrix44()
self.LocalToLight = hsMatrix44()
self.LightToWorld = hsMatrix44()
self.WorldToLight = hsMatrix44()
self.fProjection = UruObjectRef(self.getVersion()) #plLayerInterface
self.softvol = UruObjectRef(self.getVersion()) #plSoftVolume
self.scenenode = UruObjectRef(self.getVersion()) #Dunno
self.visregs = hsTArray([],self.getVersion()) #plVisRegion[]
def read(self,buf):
plObjInterface.read(self,buf)
self.ambient.read(buf)
self.diffuse.read(buf)
self.specular.read(buf)
self.LightToLocal.read(buf)
self.LocalToLight.read(buf)
self.LightToWorld.read(buf)
self.WorldToLight.read(buf)
self.fProjection.read(buf)
self.softvol.read(buf)
self.scenenode.read(buf)
self.visregs.read(buf)
def write(self,buf):
plObjInterface.write(self,buf)
self.ambient.write(buf)
self.diffuse.write(buf)
self.specular.write(buf)
self.LightToLocal.write(buf)
self.LocalToLight.write(buf)
self.LightToWorld.write(buf)
self.WorldToLight.write(buf)
self.fProjection.write(buf)
self.softvol.write(buf)
self.scenenode.write(buf)
self.visregs.write(buf)
def changePageRaw(self,sid,did,stype,dtype):
plObjInterface.changePageRaw(self,sid,did,stype,dtype)
self.softvol.changePageRaw(sid,did,stype,dtype)
self.layerint.changePageRaw(sid,did,stype,dtype)
self.scenenode.changePageRaw(sid,did,stype,dtype)
self.visregs.changePageRaw(sid,did,stype,dtype)
def _Import(scnobj,prp,obj):
# Lights
for li in scnobj.data1.vector:
if li.Key.object_type in [0x55,0x56,0x57]:
light=prp.findref(li)
light.data.import_obj(obj)
break
# Shadows
for sh in scnobj.data1.vector:
if sh.Key.object_type in [0xD5,0xD6]:
shadow=prp.findref(sh)
shadow.data.import_obj(obj)
Import = staticmethod(_Import)
#list1
class plDirectionalLightInfo(plLightInfo):
def __init__(self,parent,name="unnamed",type=0x0055):
plLightInfo.__init__(self,parent,name,type)
self.softVolumeParser = None
def _Find(page,name):
return page.find(0x0055,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x0055,name,1)
FindCreate = staticmethod(_FindCreate)
def import_obj(self,obj):
#the_name=alcUniqueName(name)
if self.Key.object_type==0x55:
type="Area"
elif self.Key.object_type==0x56:
type="Lamp"
elif self.Key.object_type==0x57:
type="Spot"
else:
raise "Unknown Lamp type"
lamp=Blender.Lamp.New(type,str(self.Key.name))
obj.link(lamp)
obj.data.energy=0.5
obj.data.dist = 1000 # plasma has no distance limit for these lights, we should reflect that in blender
maxval = max(max(self.diffuse.r,self.diffuse.g),self.diffuse.b)
if maxval > 1:
obj.data.energy = maxval * 0.5
lamp.R = self.diffuse.r / maxval
lamp.G = self.diffuse.g / maxval
lamp.B = self.diffuse.b / maxval
else:
obj.data.energy = 1 * 0.5
lamp.R = self.diffuse.r
lamp.G = self.diffuse.g
lamp.B = self.diffuse.b
softVolObj = self.getRoot().findref(self.softvol)
if softVolObj != None:
obj.addProperty("softvolume",softVolObj.data.getPropertyString(),'STRING')
return obj
#implemented in an attempt to make projection lights work
class plLimitedDirLightInfo(plDirectionalLightInfo):
def __init__(self, parent, name="unnamed", type=0x006A):
plDirectionalLightInfo.__init__(self, parent, name, type)
self.fWidth = 256
self.fHeight = 256
self.fDepth = 256
def _Find(page,name):
return page.find(0x006A,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x006A,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plDirectionalLightInfo.changePageRaw(self,sid,did,stype,dtype)
def read(self, stream):
plDirectionalLightInfo.read(self,stream)
self.fWidth = stream.ReadFloat()
self.fHeight = stream.ReadFloat()
self.fDepth = stream.ReadFloat()
def write(self, stream):
plDirectionalLightInfo.write(self,stream)
stream.WriteFloat(self.fWidth)
stream.WriteFloat(self.fHeight)
stream.WriteFloat(self.fDepth)
#list1
class plOmniLightInfo(plDirectionalLightInfo): #Incorrect, but I guess it can slip
def __init__(self,parent,name="unnamed",type=0x0056):
plDirectionalLightInfo.__init__(self,parent,name,type)
#format
self.fAttenConst=1.0
self.fAttenLinear=0.0
self.fAttenQuadratic=1.0
self.fAttenCutoff=10.0
def _Find(page,name):
return page.find(0x0056,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x0056,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plDirectionalLightInfo.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plDirectionalLightInfo.read(self,stream)
self.fAttenConst = stream.ReadFloat()
self.fAttenLinear = stream.ReadFloat()
self.fAttenQuadratic = stream.ReadFloat()
self.fAttenCutoff = stream.ReadFloat()
def write(self,stream):
plDirectionalLightInfo.write(self,stream)
stream.WriteFloat(self.fAttenConst)
stream.WriteFloat(self.fAttenLinear)
stream.WriteFloat(self.fAttenQuadratic)
stream.WriteFloat(self.fAttenCutoff)
def import_obj(self,obj):
plDirectionalLightInfo.import_obj(self,obj)
obj.data.dist=self.fAttenCutoff*16
if self.fAttenQuadratic > 0.0:
obj.data.mode = obj.data.mode | Blender.Lamp.Modes["Quad"]
obj.data.quad1=self.fAttenLinear
obj.data.quad2=self.fAttenQuadratic
else:
obj.data.mode = obj.data.mode | Blender.Lamp.Modes["Quad"]
return obj
#list1
class plSpotLightInfo(plOmniLightInfo):
def __init__(self,parent,name="unnamed",type=0x0057):
plOmniLightInfo.__init__(self,parent,name,type)
#format
self.fFalloff=1.0
self.fSpotInner=0.0
self.fSpotOuter=0.0
def _Find(page,name):
return page.find(0x0057,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x0057,name,1)
FindCreate = staticmethod(_FindCreate)
def read(self,stream):
plOmniLightInfo.read(self,stream)
self.fFalloff = stream.ReadFloat()
self.fSpotInner = stream.ReadFloat()
self.fSpotOuter = stream.ReadFloat()
def write(self,stream):
plOmniLightInfo.write(self,stream)
stream.WriteFloat(self.fFalloff)
stream.WriteFloat(self.fSpotInner)
stream.WriteFloat(self.fSpotOuter)
def import_obj(self,obj):
obj=plOmniLightInfo.import_obj(self,obj)
lamp = obj.data
obj.addProperty("fFalloff",float(self.fFalloff))
spotSizeDeg = self.fSpotOuter * 180.0 / 3.1415926536
lamp.setSpotSize(spotSizeDeg)
blend=0.0;
if self.fSpotOuter > 0:
blend = self.fSpotInner / self.fSpotOuter
lamp.setSpotBlend(blend)
return obj
class plShadowMaster(plObjInterface): # Type: 0x00D3
plDrawProperties = \
{ \
"kDisable" : 0,\
"kSelfShadow" : 1, \
"kNumProps" : 2 \
}
def __init__(self,parent,name="unnamed",type=0x00D3):
plObjInterface.__init__(self,parent,name,type)
self.fAttenDist = 10.0
self.fMaxDist = 0.0
self.fMinDist = 0.0
self.fMaxSize = 256
self.fMinSize = 256
self.fPower = 2.0
def _Find(page,name):
return page.find(0x00D3,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00D3,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plObjInterface.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plObjInterface.read(self,stream)
self.fAttenDist = stream.ReadFloat()
self.fMaxDist = stream.ReadFloat()
self.fMinDist = stream.ReadFloat()
self.fMaxSize = stream.Read32()
self.fMinSize = stream.Read32()
self.fPower = stream.ReadFloat()
def write(self,stream):
plObjInterface.write(self,stream)
stream.WriteFloat(self.fAttenDist)
stream.WriteFloat(self.fMaxDist)
stream.WriteFloat(self.fMinDist)
stream.Write32(self.fMaxSize)
stream.Write32(self.fMinSize)
stream.WriteFloat(self.fPower)
def import_obj(self,obj):
lamp = obj.data
lamp.mode |= Blender.Lamp.Modes["RayShadow"]
class plShadowCaster(plMultiModifier): #Type 0x00D4
Flags = \
{ \
"kNone" : 0, \
"kSelfShadow" : 0x1, \
"kPerspective" : 0x2, \
"kLimitRes" : 0x4 \
}
def __init__(self,parent,name="unnamed",type=0x00D4):
plMultiModifier.__init__(self,parent,name,type)
self.fCastFlags = plShadowCaster.Flags["kNone"]
self.fBoost = 1.5 # 1.0 (probable default)
self.fAttenScale = 1 # 1.0 (probable default)
self.fBlurScale = 0.3 # 0.0 (probable default)
def _Find(page,name):
return page.find(0x00D4,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00D4,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plMultiModifier.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plMultiModifier.read(self,stream)
self.fCastFlags = stream.ReadByte() & ~plShadowCaster.Flags["kPerspective"]
self.fBoost = stream.ReadFloat();
self.fAttenScale = stream.ReadFloat();
self.fBlurScale = stream.ReadFloat();
def write(self,stream):
plMultiModifier.write(self,stream)
stream.WriteByte(self.fCastFlags);
stream.WriteFloat(self.fBoost);
stream.WriteFloat(self.fAttenScale);
stream.WriteFloat(self.fBlurScale);
class plPointShadowMaster(plShadowMaster): # Type: 0x00D5
def __init__(self,parent,name="unnamed",type=0x00D5):
plShadowMaster.__init__(self,parent,name,type)
def _Find(page,name):
return page.find(0x00D5,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00D5,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plShadowMaster.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plShadowMaster.read(self,stream)
def write(self,stream):
plShadowMaster.write(self,stream)
class plDirectShadowMaster(plShadowMaster): # Type: 0x00D6
def __init__(self,parent,name="unnamed",type=0x00D6):
plShadowMaster.__init__(self,parent,name,type)
def _Find(page,name):
return page.find(0x00D6,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00D6,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plShadowMaster.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plShadowMaster.read(self,stream)
def write(self,stream):
plShadowMaster.write(self,stream)
| gpl-2.0 | 1,765,670,760,960,683,800 | 30.115768 | 111 | 0.616396 | false |
yancharkin/games_nebula_mylib_scripts | steam/original_war/settings.py | 1 | 8568 | # -*- Mode: Python; coding: utf-8; -*-
import sys, os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
import gettext
import imp
try:
from ConfigParser import ConfigParser as ConfigParser
except:
from configparser import ConfigParser as ConfigParser
nebula_dir = os.getenv('NEBULA_DIR')
modules_dir = nebula_dir + '/modules'
set_visuals = imp.load_source('set_visuals', modules_dir + '/set_visuals.py')
gettext.bindtextdomain('games_nebula', nebula_dir + '/locale')
gettext.textdomain('games_nebula')
_ = gettext.gettext
current_dir = sys.path[0]
dict_lang = {
'English':'ENG',
'Cesky':'CZE',
'German':'GER',
'Français':'FRA',
'Español':'SPA',
'Italian':'ITA',
'Polski':'POL',
'Русский':'RUS',
'Japanease':'JAP'
}
class GUI:
def __init__(self):
self.config_load()
self.create_main_window()
def config_load(self):
config_file = current_dir + '/settings.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
if not config_parser.has_section('Settings'):
self.language = 'English'
self.same_res = False
self.custom_res = False
self.custom_width = 800
self.custom_height = 600
config_parser.add_section('Settings')
config_parser.set('Settings', 'language', str(self.language))
config_parser.set('Settings', 'same_res', str(self.same_res))
config_parser.set('Settings', 'custom_res', str(self.custom_res))
config_parser.set('Settings', 'custom_width', str(self.custom_width))
config_parser.set('Settings', 'custom_height', str(self.custom_height))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
else:
self.language = config_parser.get('Settings', 'language')
self.same_res = config_parser.getboolean('Settings', 'same_res')
self.custom_res = config_parser.getboolean('Settings', 'custom_res')
self.custom_width = config_parser.get('Settings', 'custom_width')
self.custom_height = config_parser.get('Settings', 'custom_height')
def config_save(self):
config_file = current_dir + '/settings.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
config_parser.set('Settings', 'language', str(self.language))
config_parser.set('Settings', 'same_res', str(self.same_res))
config_parser.set('Settings', 'custom_res', str(self.custom_res))
config_parser.set('Settings', 'custom_width', str(self.custom_width))
config_parser.set('Settings', 'custom_height', str(self.custom_height))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
config_file = current_dir + '/game/owar.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
config_parser.set('Main', 'Language', str(dict_lang[self.language]))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
self.modify_start_file()
def quit_app(self, window, event):
self.config_save()
Gtk.main_quit()
def create_main_window(self):
self.main_window = Gtk.Window(
title = _("Original War"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
)
self.main_window.connect('delete-event', self.quit_app)
self.grid = Gtk.Grid(
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
row_spacing = 10,
column_spacing = 10,
column_homogeneous = True,
)
self.label_language = Gtk.Label(
label = _("Language")
)
self.combobox_language = Gtk.ComboBoxText()
i = 0
for lang in dict_lang:
self.combobox_language.append_text(lang)
if lang == self.language:
active_lang = i
i += 1
self.combobox_language.set_active(active_lang)
self.combobox_language.connect('changed', self.cb_combobox_language)
self.checkbutton_custom_res = Gtk.CheckButton(
label = _("Custom resolution"),
active = self.custom_res,
tooltip_text = _("Makes the ingame resolution set to a custom resolution.")
)
self.checkbutton_custom_res.connect('toggled', self.cb_checkbutton_custom_res)
self.entry_width = Gtk.Entry(
placeholder_text = _("Width"),
no_show_all = True,
max_length = 4,
max_width_chars = 4,
xalign = 0.5,
text = self.custom_width
)
self.entry_width.set_visible(self.custom_res)
self.entry_width.connect('changed', self.cb_entry_width)
self.entry_height = Gtk.Entry(
placeholder_text = _("Height"),
no_show_all = True,
max_length = 4,
max_width_chars = 4,
xalign = 0.5,
text = self.custom_height
)
self.entry_height.set_visible(self.custom_res)
self.entry_height.connect('changed', self.cb_entry_height)
self.checkbutton_same_res = Gtk.CheckButton(
label = _("Same resolution"),
active = self.same_res,
tooltip_text = _("Makes the menus and videos use the same resolution\n" + \
"as the game instead of 800x600 and 640x480")
)
self.checkbutton_same_res.connect('toggled', self.cb_checkbutton_same_res)
self.button_save = Gtk.Button(
label = _("Save and quit")
)
self.button_save.connect('clicked', self.cb_button_save)
self.grid.attach(self.label_language, 0, 0, 1, 1)
self.grid.attach(self.combobox_language, 1, 0, 1, 1)
self.grid.attach(self.checkbutton_same_res, 0, 1, 2, 1)
self.grid.attach(self.checkbutton_custom_res, 0, 2, 2, 1)
self.grid.attach(self.entry_width, 0, 3, 1, 1)
self.grid.attach(self.entry_height, 1, 3, 1, 1)
self.grid.attach(self.button_save, 0, 4, 2, 1)
self.main_window.add(self.grid)
self.main_window.show_all()
def cb_combobox_language(self, combobox):
self.language = combobox.get_active_text()
def cb_checkbutton_custom_res(self, button):
self.custom_res = button.get_active()
self.entry_width.set_visible(self.custom_res)
self.entry_height.set_visible(self.custom_res)
def cb_checkbutton_same_res(self, button):
self.same_res = button.get_active()
def cb_entry_width(self, entry):
text = entry.get_text().strip()
new_text = (''.join([i for i in text if i in '0123456789']))
entry.set_text(new_text)
self.custom_width = new_text
def cb_entry_height(self, entry):
text = entry.get_text().strip()
new_text = (''.join([i for i in text if i in '0123456789']))
entry.set_text(new_text)
self.custom_height = new_text
def cb_button_save(self, button):
self.config_save()
Gtk.main_quit()
def modify_start_file(self):
parameters_list = []
if self.same_res == True:
parameters_list.append('SameRes')
if self.custom_res == True:
parameters_list.append('CustomRes')
parameters_list.append(str(self.custom_width))
parameters_list.append(str(self.custom_height))
parameters_str = ' '.join(parameters_list)
new_launch_command = 'python "$NEBULA_DIR/launcher_wine.py" ' + \
'original_war "OwarOGL.exe' + ' ' + parameters_str + '"'
start_file = open(current_dir + '/start.sh', 'r')
start_file_content = start_file.readlines()
start_file.close()
for i in range(len(start_file_content)):
if 'Owar' in start_file_content[i]:
start_file_content[i] = new_launch_command
start_file = open(current_dir + '/start.sh', 'w')
start_file.writelines(start_file_content)
start_file.close()
def main():
import sys
app = GUI()
Gtk.main()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 5,173,142,098,500,678,000 | 32.303502 | 87 | 0.584064 | false |
Tkd-Alex/Telegram-DMI-Bot | module/esami.py | 1 | 4760 | # -*- coding: utf-8 -*-
from sets import Set
import json
import requests
def esami_output(item, sessions):
output = "*Insegnamento:* " + item["insegnamento"]
output += "\n*Docenti:* " + item["docenti"]
for session in sessions:
appeal = [appeal for appeal in item[session] if appeal]
if(appeal):
output += "\n*" + session.title() + ":* " + " | ".join(appeal)
output += "\n*Anno:* " + item["anno"] + "\n"
return output
def esami_condition(items, field, value, *session):
output = Set()
if(session):
for item in items:
if([appeal for appeal in item[value] if appeal]):
output.add(esami_output(item, [value]))
else:
for item in items:
if(value in item[field].lower()):
output.add(esami_output(item, ("prima", "seconda", "terza", "straordinaria")))
return output
def esami_cmd(args, link):
output_str = "Poffarbacco, qualcosa non va. Segnalalo ai dev \contributors \n"
if(args):
output = Set()
r = requests.get(link)
if(r.status_code == requests.codes.ok):
items = r.json()["items"]
#Clear arguments - Trasform all to lower case - Remove word 'anno', 'sessione'
args = [x.lower().encode('utf-8') for x in args if len(x) > 2]
if 'anno' in args: args.remove('anno')
if 'sessione' in args: args.remove('sessione')
#Study case
if(len(args) == 1):
if(args[0] in ("primo", "secondo", "terzo")):
output = esami_condition(items, "anno", args[0])
elif(args[0] in ("prima", "seconda", "terza", "straordinaria")):
output = esami_condition(items, "sessione", args[0], True)
elif([item["insegnamento"].lower().find(args[0]) for item in items]):
output = esami_condition(items, "insegnamento", args[0])
if(len(output)):
output_str = '\n'.join(list(output))
output_str += "\n_Risultati trovati: " + str(len(output)) + "/" + str(r.json()["status"]["length"]) + "_"
output_str += "\n_Ultimo aggiornamento: " + r.json()["status"]["lastupdate"] + "_\n"
else:
output_str = "Nessun risultato trovato :(\n"
elif(len(args) > 1):
#Create an array of session and years if in arguments
sessions = list(set(args).intersection(("prima", "seconda", "terza", "straordinaria")))
years = list(set(args).intersection(("primo", "secondo", "terzo")))
if(sessions and years):
for item in items:
if(item["anno"].lower().replace("anno","").replace(" ", "") in years):
if( [session for session in sessions if [appeal for appeal in item[session] if appeal]] ):
output.add(esami_output(item, sessions))
elif(sessions and not years):
#If years array is empty and session not, the other word are subjects
subjects = [arg for arg in args if arg not in(sessions)]
for item in items:
if(subjects):
for subject in subjects:
if( [session for session in sessions if [appeal for appeal in item[session] if appeal]] ):
if(subject in item["insegnamento"].lower()):
output.add(esami_output(item, sessions))
#List of session of all years [useless]
'''
else:
if( [session for session in sessions if [appeal for appeal in item[session] if appeal]] ):
output.add(esami_output(item, sessions))
'''
elif(not sessions and not years):
for arg in args:
output = output.union(esami_condition(items, "insegnamento", arg))
if(len(output)):
output_str = '\n'.join(list(output))
output_str += "\n_Risultati trovati: " + str(len(output)) + "/" + str(r.json()["status"]["length"]) + "_"
output_str += "\n_Ultimo aggiornamento: " + r.json()["status"]["lastupdate"] + "_\n"
else:
output_str = "Nessun risultato trovato :(\n"
else:
output_str = "Inserisci almeno uno dei seguenti parametri: giorno, materia, sessione (prima, seconda, terza, straordinaria)."
return output_str
| gpl-3.0 | 3,100,176,550,183,931,000 | 40.754386 | 133 | 0.503151 | false |
DannyArends/genenetwork2 | wqflask/maintenance/get_group_samplelists.py | 1 | 1093 | from __future__ import absolute_import, print_function, division
import os
import glob
import gzip
from base import webqtlConfig
def get_samplelist(file_type, geno_file):
if file_type == "geno":
return get_samplelist_from_geno(geno_file)
elif file_type == "plink":
return get_samplelist_from_plink(geno_file)
def get_samplelist_from_geno(genofilename):
if os.path.isfile(genofilename + '.gz'):
genofilename += '.gz'
genofile = gzip.open(genofilename)
else:
genofile = open(genofilename)
for line in genofile:
line = line.strip()
if not line:
continue
if line.startswith(("#", "@")):
continue
break
headers = line.split("\t")
if headers[3] == "Mb":
samplelist = headers[4:]
else:
samplelist = headers[3:]
return samplelist
def get_samplelist_from_plink(genofilename):
genofile = open(genofilename)
samplelist = []
for line in genofile:
line = line.split(" ")
samplelist.append(line[1])
return samplelist
| agpl-3.0 | 5,082,873,480,145,960,000 | 22.76087 | 64 | 0.614822 | false |
ponyorm/pony | pony/orm/tests/test_hooks.py | 1 | 3970 | from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests import setup_database, teardown_database, db_params
logged_events = []
db = Database()
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(unicode)
age = Required(int)
def before_insert(self):
logged_events.append('BI_' + self.name)
do_before_insert(self)
def before_update(self):
logged_events.append('BU_' + self.name)
do_before_update(self)
def before_delete(self):
logged_events.append('BD_' + self.name)
do_before_delete(self)
def after_insert(self):
logged_events.append('AI_' + self.name)
do_after_insert(self)
def after_update(self):
logged_events.append('AU_' + self.name)
do_after_update(self)
def after_delete(self):
logged_events.append('AD_' + self.name)
do_after_delete(self)
def do_nothing(person):
pass
def set_hooks_to_do_nothing():
global do_before_insert, do_before_update, do_before_delete
global do_after_insert, do_after_update, do_after_delete
do_before_insert = do_before_update = do_before_delete = do_nothing
do_after_insert = do_after_update = do_after_delete = do_nothing
db.bind(**db_params)
db.generate_mapping(check_tables=False)
set_hooks_to_do_nothing()
def flush_for(*objects):
for obj in objects:
obj.flush()
class TestHooks(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_database(db)
@classmethod
def tearDownClass(cls):
teardown_database(db)
def setUp(self):
set_hooks_to_do_nothing()
with db_session:
db.execute('delete from Person')
p1 = Person(id=1, name='John', age=22)
p2 = Person(id=2, name='Mary', age=18)
p3 = Person(id=3, name='Mike', age=25)
logged_events[:] = []
def tearDown(self):
pass
@db_session
def test_1a(self):
p4 = Person(id=4, name='Bob', age=16)
p5 = Person(id=5, name='Lucy', age=23)
self.assertEqual(logged_events, [])
db.flush()
self.assertEqual(logged_events, ['BI_Bob', 'BI_Lucy', 'AI_Bob', 'AI_Lucy'])
@db_session
def test_1b(self):
p4 = Person(id=4, name='Bob', age=16)
p5 = Person(id=5, name='Lucy', age=23)
self.assertEqual(logged_events, [])
flush_for(p4, p5)
self.assertEqual(logged_events, ['BI_Bob', 'AI_Bob', 'BI_Lucy', 'AI_Lucy'])
@db_session
def test_2a(self):
p4 = Person(id=4, name='Bob', age=16)
p1 = Person[1] # auto-flush here
p2 = Person[2]
self.assertEqual(logged_events, ['BI_Bob', 'AI_Bob'])
p2.age += 1
p5 = Person(id=5, name='Lucy', age=23)
db.flush()
self.assertEqual(logged_events, ['BI_Bob', 'AI_Bob', 'BU_Mary', 'BI_Lucy', 'AU_Mary', 'AI_Lucy'])
@db_session
def test_2b(self):
p4 = Person(id=4, name='Bob', age=16)
p1 = Person[1] # auto-flush here
p2 = Person[2]
self.assertEqual(logged_events, ['BI_Bob', 'AI_Bob'])
p2.age += 1
p5 = Person(id=5, name='Lucy', age=23)
flush_for(p4, p2, p5)
self.assertEqual(logged_events, ['BI_Bob', 'AI_Bob', 'BU_Mary', 'AU_Mary', 'BI_Lucy', 'AI_Lucy'])
@db_session
def test_3(self):
global do_before_insert
def do_before_insert(person):
some_person = Person.select().first() # should not cause infinite recursion
p4 = Person(id=4, name='Bob', age=16)
db.flush()
@db_session
def test_4(self):
global do_before_insert
def do_before_insert(person):
some_person = Person.select().first() # creates nested prefetch_context
p4 = Person(id=4, name='Bob', age=16)
Person.select().first()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,015,479,868,368,041,000 | 26.957746 | 105 | 0.58539 | false |
Spotipo/spotipo | unifispot/utils/translation.py | 1 | 6321 | from flask import g,current_app
from babel.support import LazyProxy
from flask_babelplus import gettext, lazy_gettext, ngettext
from flask_babelplus import format_datetime as _format_datetime
from flask_babelplus import Domain, get_locale
from spotipo_plugins import get_enabled_plugins
import os
import subprocess
import babel
from unifispot.ext.plugins import plugin_manager
def ugettext(s):
# we assume a before_request function
# assigns the correct user-specific
# translations
return g.translations.ugettext(s)
ugettext_lazy = LazyProxy(ugettext)
_ = gettext
_l = lazy_gettext
_n = ngettext
def format_datetime(dtime):
with current_app.app_context():
return _format_datetime(dtime)
##based on https://github.com/sh4nks/flaskbb/blob/master/flaskbb/utils/translations.py
class SpotipoDomain(Domain):
def __init__(self, app):
self.app = app
super(SpotipoDomain, self).__init__()
self.plugins_folder = os.path.join(
os.path.join(self.app.root_path, "modules")
)
# Spotipo's translations
self.spotipo_translations = os.path.join(
self.app.root_path, "translations"
)
# Plugin translations
with self.app.app_context():
self.plugin_translations = [
os.path.join(plugin.path, "translations")
for plugin in get_enabled_plugins()
]
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
locale = get_locale()
cache = self.get_translations_cache()
translations = cache.get(str(locale))
if translations is None:
# load Spotipo translations
translations = babel.support.Translations.load(
dirname=self.spotipo_translations,
locales=locale,
domain="messages"
)
# If no compiled translations are found, return the
# NullTranslations object.
if not isinstance(translations, babel.support.Translations):
return translations
# now load and add the plugin translations
for plugin in self.plugin_translations:
plugin_translation = babel.support.Translations.load(
dirname=plugin,
locales=locale,
domain="messages"
)
if isinstance(plugin_translation, babel.support.Translations):
translations.add(plugin_translation)
cache[str(locale)] = translations
return translations
def update_translations(include_plugins=False):
"""Updates all translations.
:param include_plugins: If set to `True` it will also update the
translations for all plugins.
"""
# update spotipo translations
translations_folder = os.path.join(current_app.root_path, "translations")
source_file = os.path.join(translations_folder, "messages.pot")
subprocess.call(["pybabel", "extract", "-F", "babel.cfg",
"-k", "lazy_gettext", "-o", source_file, "."])
subprocess.call(["pybabel", "update", "-i", source_file,
"-d", translations_folder])
if include_plugins:
# updates all plugin translations too
for plugin in plugin_manager.all_plugins:
update_plugin_translations(plugin)
def add_translations(translation):
"""Adds a new language to the translations."""
translations_folder = os.path.join(current_app.root_path, "translations")
source_file = os.path.join(translations_folder, "messages.pot")
subprocess.call(["pybabel", "extract", "-F", "babel.cfg",
"-k", "lazy_gettext", "-o", source_file, "."])
subprocess.call(["pybabel", "init", "-i", source_file,
"-d", translations_folder, "-l", translation])
def compile_translations(include_plugins=False):
"""Compiles all translations.
:param include_plugins: If set to `True` it will also compile the
translations for all plugins.
"""
# compile spotipo translations
translations_folder = os.path.join(current_app.root_path, "translations")
subprocess.call(["pybabel", "compile", "-d", translations_folder])
if include_plugins:
# compile all plugin translations
for plugin in plugin_manager.all_plugins:
compile_plugin_translations(plugin)
def add_plugin_translations(plugin, translation):
"""Adds a new language to the plugin translations. Expects the name
of the plugin and the translations name like "en".
"""
plugin_folder = os.path.join(current_app.root_path,'modules', plugin)
translations_folder = os.path.join(plugin_folder, "translations")
source_file = os.path.join(translations_folder, "messages.pot")
subprocess.call(["pybabel", "extract", "-F", "babel.cfg",
"-k", "lazy_gettext", "-o", source_file,
plugin_folder])
subprocess.call(["pybabel", "init", "-i", source_file,
"-d", translations_folder, "-l", translation])
def update_plugin_translations(plugin):
"""Updates the plugin translations. Expects the name of the plugin."""
plugin_folder = os.path.join(current_app.root_path,'modules', plugin)
translations_folder = os.path.join(plugin_folder, "translations")
source_file = os.path.join(translations_folder, "messages.pot")
subprocess.call(["pybabel", "extract", "-F", "babel.cfg",
"-k", "lazy_gettext", "-o", source_file,
plugin_folder])
subprocess.call(["pybabel", "update", "-i", source_file,
"-d", translations_folder])
def compile_plugin_translations(plugin):
"""Compile the plugin translations. Expects the name of the plugin."""
plugin_folder = os.path.join(self.app.root_path,'modules', plugin)
translations_folder = os.path.join(plugin_folder, "translations")
subprocess.call(["pybabel", "compile", "-d", translations_folder])
| agpl-3.0 | -8,354,495,254,639,516,000 | 35.327586 | 86 | 0.629647 | false |
librallu/cohorte-herald | python/run_pas12.py | 1 | 5011 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Runs an Herald HTTP framework
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.3
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald constants
import herald.transports.http
# Pelix
from pelix.ipopo.constants import use_waiting_list
import pelix.framework
import pelix.http
# Standard library
import argparse
import logging
# ------------------------------------------------------------------------------
def main(http_port, peer_name, node_name, app_id):
"""
Runs the framework
:param http_port: HTTP port to listen to
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
"""
# Create the framework
framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.ipopo.waiting',
'pelix.shell.core',
'pelix.shell.ipopo',
'pelix.shell.console',
'pelix.http.basic',
# Herald core
'herald.core',
'herald.directory',
'herald.shell',
# Herald HTTP
'herald.transports.http.directory',
'herald.transports.http.discovery_multicast',
'herald.transports.http.servlet',
'herald.transports.http.transport',
# RPC
'pelix.remote.dispatcher',
'pelix.remote.registry',
'herald.remote.discovery',
'herald.remote.herald_xmlrpc',
# ROUTING
'herald.routing_handler',
'herald.routing_hellos',
'herald.routing_roads',
'herald.routing_json',
# TEST
'displayMessages',
),
{herald.FWPROP_NODE_UID: node_name,
herald.FWPROP_NODE_NAME: node_name,
herald.FWPROP_PEER_NAME: peer_name,
herald.FWPROP_APPLICATION_ID: app_id})
# Start everything
framework.start()
context = framework.get_bundle_context()
# Instantiate components
with use_waiting_list(context) as ipopo:
# ... HTTP server
ipopo.add(pelix.http.FACTORY_HTTP_BASIC, "http-server",
{pelix.http.HTTP_SERVICE_PORT: http_port})
# ... HTTP reception servlet
ipopo.add(herald.transports.http.FACTORY_SERVLET,
"herald-http-servlet")
# ... HTTP multicast discovery
ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST,
"herald-http-discovery-multicast")
ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST,
"herald-http-discovery-multicast2", {'multicast.port': 42001})
# Start the framework and wait for it to stop
framework.wait_for_stop()
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Pelix Herald demo")
# HTTP server
group = parser.add_argument_group("HTTP Configuration",
"Configuration of the HTTP transport")
group.add_argument("-p", "--port", action="store", type=int, default=0,
dest="http_port", help="Port of the HTTP server")
# Peer info
group = parser.add_argument_group("Peer Configuration",
"Identity of the Peer")
group.add_argument("-n", "--name", action="store", default=None,
dest="name", help="Peer name")
group.add_argument("--node", action="store", default=None,
dest="node", help="Node name")
group.add_argument("-a", "--app", action="store",
default=herald.DEFAULT_APPLICATION_ID,
dest="app_id", help="Application ID")
# Parse arguments
args = parser.parse_args()
# Configure the logging package
logging.basicConfig(level=logging.INFO)
logging.getLogger('herald').setLevel(logging.DEBUG)
logging.getLogger("requests").setLevel(logging.WARNING)
# Run the framework
main(args.http_port, args.name, args.node, args.app_id)
| apache-2.0 | -3,697,845,401,348,223,500 | 30.31875 | 80 | 0.594692 | false |
brandm/live-py-plugin | test/PySrc/tests/test_code_tracer_matplotlib.py | 1 | 1087 | import re
import pytest
import sys
from canvas import Canvas
from code_tracer import CodeTracer
from test_report_builder import trim_report
@pytest.fixture(name='is_matplotlib_cleared')
def clear_matplotlib():
for should_yield in (True, False):
to_delete = [module_name
for module_name in sys.modules
if module_name.startswith('matplotlib')]
for module_name in to_delete:
del sys.modules[module_name]
if should_yield:
yield True
def replace_image(report):
report = trim_report(report)
report = re.sub(r"image=u?'[a-zA-Z0-9+/=]*'", "image='...'", report)
return report
def test_show(is_matplotlib_cleared):
assert is_matplotlib_cleared
code = """\
import matplotlib.pyplot as plt
data = [1, 2]
plt.plot(data)
plt.show()
"""
expected_report = """\
create_image
0
0
image='...'
"""
canvas = Canvas(width=100, height=100)
tracer = CodeTracer(canvas)
report = tracer.trace_turtle(code)
assert expected_report == replace_image(report)
| mit | -4,368,468,544,927,899,000 | 20.74 | 72 | 0.632935 | false |
snorecore/lights | tests/test_light_func.py | 1 | 1566 | import pytest
import time
# Not entirely happy with this check
# TODO Change when I understand pytest better
def skip_if_unplugged():
import usb.core
dev = usb.core.find(idVendor=0x0403, idProduct=0x6014)
if dev is None:
pytest.skip("FT232H not plugged in")
def test_lights(num_lights, light_type, brightness, backend):
# Skip if unplugged
if backend == "pyftdi":
skip_if_unplugged()
# Create string of lights
light_string = light_type(num_lights, backend=backend,
brightness=brightness)
# Set light colours
colours = [(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255)]
for i in range(num_lights):
light_string[i] = colours[i % len(colours)]
# Update light
light_string.show()
# Show lights for a few seconds
time.sleep(3)
def test_lights_2(num_lights, light_type, brightness, backend):
# Skip if unplugged
if backend == "pyftdi":
skip_if_unplugged()
# Create string of lights
light_string = light_type(num_lights, brightness=brightness)
light_string.backend = backend
# Set all lights to white
light_string.set_all(255, 255, 255)
# Display changes
light_string.show()
# Show lights for a few seconds
time.sleep(3)
# Clear all lights (set to black)
light_string.clear()
# Display changes
light_string.show()
# Show lights for a few seconds
time.sleep(3)
| mit | 7,846,304,336,833,207,000 | 22.727273 | 64 | 0.60281 | false |
stormi/tsunami | src/primaires/salle/commandes/decor/retirer.py | 1 | 2992 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'retirer' de la commande 'décor'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmRetirer(Parametre):
"""Commande 'décor retirer'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "retirer", "remove")
self.schema = "<cle>"
self.aide_courte = "retire un décor"
self.aide_longue = \
"Cette commande permet de retirer un ou plusieurs décors " \
"dans la salle où vous vous trouvez. Vous devez préciser " \
"la clé du prototype de décor. Si plusieurs décors de " \
"ce prototype sont dans la salle, ils seront tous retirés."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
cle = dic_masques["cle"].cle
try:
decor = importeur.salle.decors[cle]
except KeyError:
personnage << "|err|Ce décor {} est inconnu.|ff|".format(cle)
else:
nb_avt = len(personnage.salle.decors)
personnage.salle.supprimer_decors(decor.cle)
nb_apr = len(personnage.salle.decors)
nb = nb_avt - nb_apr
if nb == 0:
personnage << "|err|aucun décor n'a été retiré.|ff|"
else:
personnage << "{} décor(s) retiré(s).".format(nb)
| bsd-3-clause | -2,200,089,850,346,754,300 | 44.707692 | 79 | 0.683608 | false |
wevote/WeVoteServer | import_export_batches/controllers_ballotpedia.py | 1 | 3545 | # import_export_batches/controllers_ballotpedia.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import create_batch_from_json_wrapper, BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_BALLOT_ITEMS, \
BATCH_HEADER_MAP_CANDIDATES_TO_BALLOTPEDIA_CANDIDATES, BATCH_HEADER_MAP_CONTEST_OFFICES_TO_BALLOTPEDIA_RACES, \
BATCH_HEADER_MAP_MEASURES_TO_BALLOTPEDIA_MEASURES
# from import_export_ballotpedia.controllers import groom_ballotpedia_data_for_processing
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
# VOTE_SMART_API_KEY = get_environment_variable("VOTE_SMART_API_KEY")
CANDIDATE = 'CANDIDATE'
CONTEST_OFFICE = 'CONTEST_OFFICE'
ELECTED_OFFICE = 'ELECTED_OFFICE'
IMPORT_BALLOT_ITEM = 'IMPORT_BALLOT_ITEM'
IMPORT_VOTER = 'IMPORT_VOTER'
MEASURE = 'MEASURE'
POLITICIAN = 'POLITICIAN'
def store_ballotpedia_json_response_to_import_batch_system(
modified_json_list=[], google_civic_election_id='', kind_of_batch='',
batch_set_id=0, state_code=""):
success = False
status = ""
batch_header_id = 0
number_of_batch_rows = 0
if kind_of_batch is CONTEST_OFFICE:
filename = "Races from Ballotpedia API"
if state_code != "":
filename += " for " + state_code.upper()
organization_we_vote_id = ""
results = create_batch_from_json_wrapper(
filename, modified_json_list,
BATCH_HEADER_MAP_CONTEST_OFFICES_TO_BALLOTPEDIA_RACES, kind_of_batch,
google_civic_election_id, organization_we_vote_id, batch_set_id=batch_set_id, state_code=state_code)
return results
elif kind_of_batch is CANDIDATE:
filename = "Candidates from Ballotpedia API"
if state_code != "":
filename += " for " + state_code.upper()
organization_we_vote_id = ""
results = create_batch_from_json_wrapper(
filename, modified_json_list,
BATCH_HEADER_MAP_CANDIDATES_TO_BALLOTPEDIA_CANDIDATES, kind_of_batch,
google_civic_election_id, organization_we_vote_id, batch_set_id=batch_set_id, state_code=state_code)
return results
elif kind_of_batch is MEASURE:
filename = "Measures from Ballotpedia API"
if state_code != "":
filename += " for " + state_code.upper()
organization_we_vote_id = ""
results = create_batch_from_json_wrapper(
filename, modified_json_list,
BATCH_HEADER_MAP_MEASURES_TO_BALLOTPEDIA_MEASURES, kind_of_batch,
google_civic_election_id, organization_we_vote_id, batch_set_id=batch_set_id, state_code=state_code)
return results
elif kind_of_batch is 'IMPORT_BALLOT_ITEM':
filename = "Ballot Items for Address from Ballotpedia API"
if state_code != "":
filename += " for " + state_code.upper()
organization_we_vote_id = ""
# BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_VOTER_DISTRICTS was used, but didn't make sense
results = create_batch_from_json_wrapper(
filename, modified_json_list,
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_BALLOT_ITEMS, kind_of_batch,
google_civic_election_id, organization_we_vote_id, batch_set_id=batch_set_id, state_code=state_code)
return results
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
| mit | 2,723,810,443,694,618,600 | 43.873418 | 115 | 0.65811 | false |
alephdata/aleph | aleph/migrate/versions/9dcef7592cea_added_fields_to_entitysetitem_to_support_profiles.py | 1 | 2882 | """Added fields to EntitySetItem to support profiles and removed Linkage
Revision ID: 9dcef7592cea
Revises: 3174fef04825
Create Date: 2020-07-21 13:42:06.509804
"""
from itertools import groupby, takewhile
from alembic import op
import sqlalchemy as sa
revision = "9dcef7592cea"
down_revision = "4c9e198c5b31"
def upgrade():
op.add_column(
"entityset_item", sa.Column("added_by_id", sa.Integer(), nullable=True)
)
judgement_enum = sa.Enum(
"POSITIVE", "NEGATIVE", "UNSURE", "NO_JUDGEMENT", name="judgement"
)
judgement_enum.create(op.get_bind())
op.add_column(
"entityset_item", sa.Column("judgement", judgement_enum, nullable=True)
)
op.create_foreign_key(None, "entityset_item", "role", ["added_by_id"], ["id"])
op.add_column(
"entityset_item",
sa.Column("compared_to_entity_id", sa.String(length=128), nullable=True),
)
bind = op.get_bind()
meta = sa.MetaData()
meta.bind = bind
meta.reflect()
linkage_table = meta.tables["linkage"]
entityset_table = meta.tables["entityset"]
item_table = meta.tables["entityset_item"]
q = sa.update(item_table)
q = q.values({"judgement": "POSITIVE"})
bind.execute(q)
q = sa.select([linkage_table]).order_by("profile_id")
rp = bind.execute(q)
profiles = groupby(
takewhile(lambda x: x is not None, rp), key=lambda x: str(x.profile_id),
)
judgement_lookup = {
True: "POSITIVE",
False: "NEGATIVE",
None: "UNSURE",
}
for profile_id, links in profiles:
links = list(links)
role_id = links[0].context_id
collection_id = links[0].collection_id
created_at = min(link.created_at for link in links)
updated_at = max(link.updated_at for link in links)
q = sa.insert(entityset_table)
q = q.values(
{
"id": profile_id,
"label": "linkage_migrate",
"type": "profile",
"role_id": role_id,
"collection_id": collection_id,
"updated_at": updated_at,
"created_at": created_at,
}
)
bind.execute(q)
for link in links:
judgment = judgement_lookup[link.decision]
q = sa.insert(item_table)
q = q.values(
{
"entityset_id": profile_id,
"entity_id": link.entity_id,
"collection_id": collection_id,
"updated_at": link.updated_at,
"created_at": link.created_at,
"added_by_id": link.decider_id,
"judgement": judgment,
"deleted_at": None,
}
)
bind.execute(q)
op.drop_table("linkage")
def downgrade():
pass
| mit | 846,511,055,351,786,100 | 28.111111 | 82 | 0.548924 | false |
nagaozen/my-os-customizations | home/nagaozen/.gnome2/gedit/plugins/codecompletion/__init__.py | 1 | 2726 | # -*- coding: utf-8 -*-
# gedit CodeCompletion plugin
# Copyright (C) 2011 Fabio Zendhi Nagao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gedit
import jsonprovider
class JSONCompletionWindowHelper:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self._provider = jsonprovider.JSONProvider(plugin)
# Add the provider to all the views
for view in self._window.get_views():
self.add_view(view)
self._tab_added_id = self._window.connect('tab-added', self.on_tab_added)
self._tab_removed_id = self._window.connect('tab-removed', self.on_tab_removed)
def deactivate(self):
# Remove the provider from all the views
for view in self._window.get_views():
view.get_completion().completion.remove_provider(self._provider)
self._window.disconnect(self._tab_added_id)
self._window.disconnect(self._tab_removed_id)
self._window = None
self._plugin = None
def update_ui(self):
pass
def add_view(self, view):
view.get_completion().add_provider(self._provider)
def remove_view(self, view):
view.get_completion().remove_provider(self._provider)
def on_tab_added(self, window, tab):
# Add provider to the new view
self.add_view(tab.get_view())
def on_tab_removed(self, window, tab):
# Remove provider from the view
self.remove_view(tab.get_view())
class JSONCompletionPlugin(gedit.Plugin):
WINDOW_DATA_KEY = "JSONCompletionPluginWindowData"
def __init__(self):
gedit.Plugin.__init__(self)
def activate(self, window):
helper = JSONCompletionWindowHelper(self, window)
window.set_data(self.WINDOW_DATA_KEY, helper)
def deactivate(self, window):
window.get_data(self.WINDOW_DATA_KEY).deactivate()
window.set_data(self.WINDOW_DATA_KEY, None)
def update_ui(self, window):
window.get_data(self.WINDOW_DATA_KEY).update_ui()
# ex:ts=4:et:
| gpl-3.0 | 7,508,295,284,640,012,000 | 32.654321 | 87 | 0.651137 | false |
faustedition/faust-app | faust-utils/data_curation/check_xml_space.py | 1 | 1569 | #!/usr/bin/env python
#
# Search for missing @xml:space
import lxml.etree
import faust
ignored_tags = (
"app", "back", "body", "choice", "div", "docTitle", "fix", "front", "fw", "g",
"group", "lg", "overw", "patch", "sp", "subst", "surface", "text", "titlePage", "titlePart",
"used", "zone")
ignored_empty_elems = (
"addSpan", "anchor", "cb", "certainty", "damageSpan", "delSpan", "gap", "grBrace", "grLine", "handShift",
"ins", "join", "lb", "pb", "space", "st", "undo", "p")
element_selector_xp = faust.xpath("//*[(ancestor::tei:text or ancestor::ge:document) and not(@xml:space)]")
text_content_xp = faust.xpath("normalize-space()")
candidates = dict()
for xml_file in faust.xml_files():
try:
if faust.is_tei_document(xml_file):
xml = lxml.etree.parse(xml_file)
xml_key = faust.relative_path(xml_file)
candidates[xml_key] = []
for elem in element_selector_xp(xml):
if elem.tag.startswith(faust.ns("svg")): continue
local_name = elem.tag[elem.tag.rfind("}") + 1:]
if local_name in ignored_tags: continue
empty_elem = elem.text is None and len(elem) == 0
if empty_elem and local_name in ignored_empty_elems: continue
text_content = text_content_xp(elem)
if empty_elem or (len(text_content) > 0 and len(text_content.strip()) == 0):
candidates[xml_key].append(lxml.etree.tostring(elem))
except IOError:
sys.stderr.write("I/O error while validating " + xml_file + "\n")
for xml_key in candidates:
elems = candidates[xml_key]
if len(elems) > 0: print xml_key, "===>", repr(elems)
| agpl-3.0 | -3,056,886,725,584,611,000 | 32.382979 | 107 | 0.635437 | false |
lpouillo/execo-g5k-tools | engines/l2c_fft_eval/l2c_fft.py | 1 | 7301 | #!/usr/bin/env python
import os, math, sys
from pprint import pformat
from tempfile import mkstemp
from execo import Process
from execo import logger as ex_log
from execo.log import style
from execo_g5k import get_site_clusters, OarSubmission, oardel, get_cluster_site, \
oarsub, wait_oar_job_start, get_oar_job_nodes, get_host_attributes, get_oar_job_info, \
g5k_configuration
from execo_engine import Engine, logger, ParamSweeper, sweep, slugify, igeom
# Configure OAR to use always the same key
g5k_configuration['oar_job_key_file'] = '/home/jrichard/.oar_key'
#ex_log.setLevel('DEBUG')
def expRange(start, end, base=2):
"""
Generate a list containing geometric progression
starting from 'start' and ending by 'end'
"""
return igeom(start, end, int(math.log(end/start)/math.log(base)+1.5))
class l2c_fft(Engine):
workingPath = '/home/jrichard/l2c-fft-new-distrib/bin'
genLadScript = '/home/jrichard/l2c-fft-new-distrib/src/utils/gen-lad/genPencil.py'
def run(self):
"""
Main engine method to perform the experiment
"""
self.define_parameters()
while len(self.sweeper.get_remaining()) > 0:
# Getting the next combination
comb = self.sweeper.get_next()
logger.info(style.host(slugify(comb)) + ' has been started')
self.get_nodes(comb)
# If the job is broken, the program is stopped
if get_oar_job_info(self.oar_job_id, self.frontend)['state'] == 'Error':
break
try:
self.workflow(comb)
# Process all combinations that can use the same submission
while True:
# Find the next combination combinations that can use the same submission
subcomb = self.sweeper.get_next(lambda r:
filter(lambda x: x['cores'] == comb['cores']
and x['cluster'] == comb['cluster'], r))
if not subcomb:
logger.info('No more combination for cluster=%s and cores=%s',
comb['cluster'], comb['cores'])
break
else:
logger.info(style.host(slugify(subcomb)) + ' has been started')
if get_oar_job_info(self.oar_job_id, self.frontend)['state'] != 'Error':
self.workflow(subcomb)
else:
break
# Whatever happens (errors, end of loop), the job is deleted
finally:
logger.info('Deleting job...')
oardel([(self.oar_job_id, self.frontend)])
def workflow(self, comb):
"""
Compute one application launch
using a given parameter group
"""
comb_ok = False
try:
# Generate configuration file needed by MPI processes
logger.info("Generating assembly file...")
py = comb['cores'] / comb['px']
prepare = Process('cd %s && python %s %d %d %d %d %d %s app.lad' %
(self.workingPath, self.genLadScript, comb['datasize'], comb['datasize'],
comb['datasize'], comb['px'], py, comb['transposition']))
prepare.shell = True
prepare.run()
# Generate the MPI host file
mfile = self.generate_machine_file()
# Start L2C
lad = "./app.lad"
logger.info("Computing...")
res = Process("export OAR_JOB_KEY_FILE=~/.oar_key ; cd %s && l2c_loader -M,-machinefile,%s --mpi -c %d %s" % (self.workingPath, mfile, comb['cores'], lad))
res.shell = True
res.stdout_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.out'))
res.stdout_handlers.append(sys.stdout)
res.stderr_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.err'))
res.stderr_handlers.append(sys.stderr)
res.run()
if not res.ok:
logger.error('Bad L2C termination')
raise Exception('Bad L2C termination')
if len(res.stderr) > 0: # WARNING: when L2C cannot find the LAD file or something strange like this
logger.warning('Not empty error output')
# Clean configuration files
logger.info("Removing assembly files...")
res = Process('cd %s && rm -f app.lad*' % self.workingPath)
res.shell = True
res.run()
comb_ok = True
except Exception:
pass
finally:
if comb_ok:
self.sweeper.done(comb)
logger.info(style.host(slugify(comb)) + ' has been done')
else:
self.sweeper.cancel(comb)
logger.warning(style.host(slugify(comb)) + ' has been canceled')
logger.info(style.step('%s Remaining'),
len(self.sweeper.get_remaining()))
def define_parameters(self):
"""
Define the parametters used by the L2C application
"""
parameters = {
'cluster': [cluster for site in ['grenoble', 'nancy']
for cluster in get_site_clusters(site) if cluster != 'graphite'],
'cores': {i: {'px': expRange(1, i)}
for i in expRange(4, 64)},
'datasize': expRange(256, 256),
'transposition': ['XYZ', 'XZY', 'YXZ', 'YZX', 'ZXY', 'ZYX']}
logger.info(pformat(parameters))
sweeps = sweep(parameters)
self.sweeper = ParamSweeper(os.path.join(self.result_dir, "sweeps"), sweeps)
logger.info('Number of parameters combinations %s', len(self.sweeper.get_remaining()))
def get_nodes(self, comb):
"""
Perform a submission for a given comb and
retrieve the submission node list
"""
logger.info('Performing submission')
n_core = get_host_attributes(comb['cluster'] + '-1')['architecture']['smt_size']
submission = OarSubmission(resources="nodes=%d" % (max(1, comb['cores']/n_core), ),
sql_properties="cluster='%s'"%comb['cluster'],
job_type="besteffort",
name="l2c_fft_eval")
self.oar_job_id, self.frontend = oarsub([(submission, get_cluster_site(comb['cluster']))])[0]
logger.info("Waiting for job start")
wait_oar_job_start(self.oar_job_id, self.frontend)
logger.info("Retrieving hosts list")
nodes = get_oar_job_nodes(self.oar_job_id, self.frontend)
self.hosts = [host for host in nodes for i in range(n_core)]
def generate_machine_file(self):
"""
Generate a machine file used by MPI
to know which nodes use during the computation
"""
fd, mfile = mkstemp(dir='/tmp/', prefix='mfile_')
f = os.fdopen(fd, 'w')
f.write('\n'.join([host.address for host in self.hosts]))
f.close()
return mfile
if __name__ == "__main__":
engine = l2c_fft()
engine.start()
| gpl-3.0 | -8,972,633,790,968,663,000 | 39.337017 | 167 | 0.544857 | false |
tsmith328/Homework | Python/CS 1301/HW2/HW2.py | 1 | 1179 | #Tyler Smith and Kamryn Harris
#[email protected] and [email protected]
#CS 1301 Section A06
#We worked on this assignment alone, using only the semester's course materials.
def mass(kg):
stone = kg * 0.157
return stone
def volume(liter):
pint = liter * 2.11
print('There are %.2f' % pint , 'US pints in %.2f' % liter , 'liters.')
def monkeys(monkeys):
hockeyPucks = monkeys * 37.62533333333
print('There are %.4f' % hockeyPucks , 'hockey pucks in %.6f' % monkeys , 'average spider monkeys.')
def tipCalculator():
import math
bill = input('How much is your bill?')
tip = input('What percent do you want to leave your server? Do not include the percentage sign.')
bill = float(bill)
tip = float(tip) / 100 #tip as a percent
tipAmount = bill * tip
tipAmount = math.ceil(tipAmount) #Rounds tip to next dollar
taxAmount = bill * 0.078 #calculates tax
taxAmount = round(taxAmount , 2) #Rounds to cents
total = bill + tipAmount + taxAmount #Calculates total bill
print('Tax is: $%.2f' % taxAmount) #formats amounts to two decimals
print('Tip is: $%.2f' % tipAmount)
print('Total is: $%.2f' % total) | mit | 581,807,367,253,504,900 | 37.064516 | 104 | 0.669211 | false |
DXCanas/kolibri | kolibri/core/auth/management/commands/importusers.py | 1 | 6549 | import csv
import logging
from functools import partial
from itertools import starmap
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db import transaction
from kolibri.core.auth.models import Classroom
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
logger = logging.getLogger(__name__)
DEFAULT_PASSWORD = "kolibri"
def validate_username(user):
# Check if username is specified, if not, throw an error
if 'username' not in user or user['username'] is None:
raise CommandError('No usernames specified, this is required for user creation')
def infer_facility(user, default_facility):
if 'facility' in user and user['facility']:
try:
# Try lookup by id first, then name
return Facility.objects.get(pk=user['facility'])
except (Facility.DoesNotExist, ValueError):
try:
return Facility.objects.get(name=user['facility'])
except Facility.DoesNotExist:
raise CommandError('Facility matching identifier {facility} was not found'.format(facility=user['facility']))
else:
return default_facility
def infer_and_create_class(user, facility):
if 'class' in user and user['class']:
try:
# Try lookup by id first, then name
classroom = Classroom.objects.get(pk=user['class'], parent=facility)
except (Classroom.DoesNotExist, ValueError):
try:
classroom = Classroom.objects.get(name=user['class'], parent=facility)
except Classroom.DoesNotExist:
classroom = Classroom.objects.create(name=user['class'], parent=facility)
return classroom
def create_user(i, user, default_facility=None):
validate_username(user)
if i == 0 and all(key == val or val is None for key, val in user.items()):
# Check whether the first row is a header row or not
# Either each key will be equal to the value
# Or the header is not included in the CSV, so it is None
return False
facility = infer_facility(user, default_facility)
classroom = infer_and_create_class(user, facility)
username = user['username']
try:
user_obj = FacilityUser.objects.get(username=username, facility=facility)
logger.warn('Tried to create a user with the username {username} in facility {facility}, but one already exists'.format(
username=username,
facility=facility
))
if classroom:
classroom.add_member(user_obj)
return False
except FacilityUser.DoesNotExist:
password = user.get('password', DEFAULT_PASSWORD) or DEFAULT_PASSWORD
try:
new_user = FacilityUser.objects.create_user(
full_name=user.get('full_name', ''),
username=username,
facility=facility,
password=password,
)
if classroom:
classroom.add_member(new_user)
logger.info('User created with username {username} in facility {facility} with password {password}'.format(
username=username,
facility=facility,
password=password,
))
return True
except ValidationError as e:
logger.error('User not created with username {username} in facility {facility} with password {password}'.format(
username=username,
facility=facility,
password=password,
))
for key, error in e.message_dict.items():
logger.error('{key}: {error}'.format(key=key, error=error[0]))
return False
class Command(BaseCommand):
help = """
Imports a user list from CSV file and creates
them on a specified or the default facility.
Requires CSV file data in this form:
<full_name>,<username>,<password>,<facility>,<class>
Less information can be passed if the headers are specified:
full_name,username,password,class
<full_name>,<username>,<password>,<class>
Or in a different order:
username,full_name
<username>,<full_name>
At minimum the username is required.
The password will be set to '{DEFAULT_PASSWORD}' if none is set.
The facility can be either the facility id or the facility name.
If no facility is given, either the default facility,
or the facility specified with the --facility commandline argument will be used.
""".format(DEFAULT_PASSWORD=DEFAULT_PASSWORD)
def add_arguments(self, parser):
parser.add_argument('filepath', action='store', help='Path to CSV file.')
parser.add_argument('--facility', action='store', type=str, help='Facility id to import the users into')
def handle(self, *args, **options):
if options['facility']:
default_facility = Facility.objects.get(pk=options['facility'])
else:
default_facility = Facility.get_default_facility()
if not default_facility:
raise CommandError('No default facility exists, please make sure to provision this device before running this command')
fieldnames = ['full_name', 'username', 'password', 'facility', 'class']
# open using default OS encoding
with open(options['filepath']) as f:
header = next(csv.reader(f, strict=True))
if all(col in fieldnames for col in header):
# Every item in the first row matches an item in the fieldnames, it is a header row
if 'username' not in header:
raise CommandError('No usernames specified, this is required for user creation')
ordered_fieldnames = header
elif any(col in fieldnames for col in header):
raise CommandError('Mix of valid and invalid header labels found in first row')
else:
ordered_fieldnames = fieldnames
# open using default OS encoding
with open(options['filepath']) as f:
reader = csv.DictReader(f, fieldnames=ordered_fieldnames, strict=True)
with transaction.atomic():
create_func = partial(create_user, default_facility=default_facility)
total = sum(starmap(create_func, enumerate(reader)))
logger.info('{total} users created'.format(total=total))
| mit | -7,310,236,231,817,944,000 | 40.188679 | 131 | 0.641472 | false |
lovasb/pylogstash | setup.py | 1 | 4787 | import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
version = '0.1'
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where = '.', package = '',
exclude = standard_exclude,
exclude_directories = standard_exclude_directories,
only_in_packages = False,
show_ignored = False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
LONG_DESCRIPTION = """
===========
Log from python to Logstash server through encrypted TCP
===========
Handling datas.
"""
setup(
name = 'pylogstash',
version = version,
description = "Log from python to Logstash server through encrypted TCP",
long_description = LONG_DESCRIPTION,
classifiers = [
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Natural Language :: English",
],
keywords = ['logstash', 'logging', 'django'],
author = 'Lovas Bence',
author_email = '[email protected]',
url = 'http://lovasb.com',
license = 'gplv3',
packages = find_packages(),
package_data = find_package_data("pylogstash", only_in_packages = False),
include_package_data = True,
zip_safe = False,
install_requires = ['setuptools'],
) | gpl-3.0 | 8,913,605,518,605,522,000 | 35.549618 | 86 | 0.548151 | false |
ColdrickSotK/storyboard | storyboard/plugin/email/__init__.py | 1 | 3081 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log
from storyboard.common.working_dir import get_plugin_directory
CONF = cfg.CONF
LOG = log.getLogger(__name__)
PLUGIN_OPTS = [
cfg.BoolOpt("enable",
default=False,
help="Enable, or disable, the notification email plugin."),
cfg.StrOpt("sender",
default='StoryBoard (Do Not Reply)'
'<[email protected]>',
help="The email address from which storyboard will send its "
"messages."),
cfg.StrOpt("reply_to",
default=None,
help="The email address of the Reply-To header (optional)."),
cfg.StrOpt("default_url",
default=None,
help="The default/fallback url base to use in emails."),
cfg.StrOpt("smtp_host",
default='localhost',
help="The SMTP server to use."),
cfg.IntOpt("smtp_port",
default=25,
help="The SMTP Server Port to connect to (default 25)."),
cfg.IntOpt("smtp_timeout",
default=10,
help="Timeout, in seconds, to wait for the SMTP connection to "
"fail"),
cfg.StrOpt("smtp_local_hostname",
default=None,
help="The FQDN of the sending host when identifying itself "
"to the SMTP server (optional)."),
cfg.StrOpt("smtp_ssl_keyfile",
default=None,
help="Path to the SSL Keyfile, when using ESMTP. Please make "
"sure the storyboard client can read this file."),
cfg.StrOpt("smtp_ssl_certfile",
default=None,
help="Path to the SSL Certificate, when using ESMTP "
"(optional). Please make sure the storyboard client can "
"read this file."),
cfg.StrOpt("smtp_user",
default=None,
help="Username/login for the SMTP server."),
cfg.StrOpt("smtp_password",
default=None,
help="Password for the SMTP server.")
]
CONF.register_opts(PLUGIN_OPTS, "plugin_email")
def get_email_directory():
"""A shared utility method that always provides the same working
directory. Error handling is explicitly not provided, as the methods used
'should' be consistent about the errors they themselves raise.
"""
return get_plugin_directory("email")
| apache-2.0 | 3,523,190,412,246,369,000 | 38.5 | 78 | 0.610841 | false |
rhyswhitley/savanna_iav | src/figures/trends/gpp_annual_trend_2alt.py | 1 | 9771 | #!/usr/bin/env python2
import os
import re
import natsort
import string
import netCDF4 as nc
import numpy as np
import pandas as pd
import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.cm import get_cmap
from matplotlib import style
from scipy import stats
from collections import OrderedDict
def fit_trend(x, y):
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return {'model': p, 'slope': slope, 'int': intercept, \
'r2': r_value, 'se': std_err, 'p': p_value}
def add_trend(pObj, xseries, yseries, col, header):
# get linear trend line
trend = fit_trend(xseries, yseries)
# create a label to shows statistics
trend_lab = '{0}: slope = {1:.2f}, p = {2:.3f}' \
.format(header, trend['slope'], trend['p'])
# make the significant slopes stand out more
if trend['p'] < 0.05:
sig_lw = 4
sig_alpha = 1
else:
sig_lw = 1.75
sig_alpha = 0.5
# plot trend line
pObj.plot(xseries, trend['model'](xseries), '-', c=col, \
label=trend_lab, alpha=sig_alpha, lw=sig_lw)
def add_mylegend(pObj, part, title, step=2, fontsize=11, xloc=0, yloc=0):
handles, labels = pObj.get_legend_handles_labels()
leg = pObj.legend(handles[part::step], labels[part::step], bbox_to_anchor=(xloc, yloc), \
prop={'size':fontsize}, loc='center', title=title)
return leg
def agg_tseries_up(init_df, conv=1):
# upscale to day :: integrated sum [response to discrete value]
day_df = init_df.resample('D', how= \
lambda x: integrate.trapz(x, dx=1800)*conv)
# upscale to month: simple sum
month_df = day_df.groupby([day_df.index.year, day_df.index.month]) \
.aggregate("sum")
# upscale to year: simple sum
year_df = day_df.groupby([day_df.index.year]).aggregate("sum")
# return to user the time-series at different levels
return {"day": day_df, "month": month_df, "year": year_df}
def get_value(nc_obj, label):
return nc_obj.variables[label][:].flatten()
def get_dataframe(npath):
"""
A quick function to transform a netcdf file into a pandas dataframe that
can be used for analysis and plotting. Attributes are extracted using
in built netCDF4 library functions. Time is arbitrary and needs to be
set by the user.
"""
# make a connection to the ncdf file
ncdf_con = nc.Dataset(npath, 'r', format="NETCDF4")
# number of rows, equivalent to time-steps
time_len = len(ncdf_con.dimensions['time'])
# extract time information
time_sec = ncdf_con.variables['time']
sec_orig = re.search(r'\d+.*', str(time_sec.units)).group(0)
# create a new dataframe from the netCDF file
nc_dataframe = pd.DataFrame({"trees_Ag": get_value(ncdf_con, "Atree"), \
"grass_Ag": get_value(ncdf_con, "Agrass")}, \
index=pd.date_range(sec_orig, \
periods=time_len, freq="30min"))
return nc_dataframe
def plot_gpp_trends(canopy_data_list):
plt.rcParams['lines.linewidth'] = 1.25
plt.rcParams.update({'mathtext.default': 'regular'})
n_exps = len(canopy_data_list)
col1_map = get_cmap("summer")(np.linspace(0.1, 1, n_exps))
col2_map = get_cmap("winter")(np.linspace(0.1, 1, n_exps))
col3_map = get_cmap("autumn")(np.linspace(0.1, 1, n_exps))
# create plotting area
plt.figure(figsize=(10, 9))
plot_grid = gridspec.GridSpec(1, 1, hspace=0.1)
# create subplots
ax1 = plt.subplot(plot_grid[0])
# for each experiment
for (i, canopy) in enumerate(canopy_data_list):
# resample to day timestep
canp_upts = agg_tseries_up(canopy.ix[:, ["trees_Ag", "grass_Ag"]], 1e-6*12)
# time
trees_Cf = np.array(canp_upts["year"]["trees_Ag"])
grass_Cf = np.array(canp_upts["year"]["grass_Ag"])
total_Cf = trees_Cf + grass_Cf
y_tseries = np.arange(2001, 2015)
# carbon
ax1.plot(y_tseries, total_Cf, 'o--', alpha=0.4, c=col1_map[i])
ax1.plot(y_tseries, trees_Cf, 'o--', alpha=0.4, c=col2_map[i])
ax1.plot(y_tseries, grass_Cf, 'o--', alpha=0.4, c=col3_map[i])
# y ~ x
add_trend(ax1, y_tseries, total_Cf, col1_map[i], "Exp_{0}".format(i+1))
add_trend(ax1, y_tseries, trees_Cf, col2_map[i], "Exp_{0}".format(i+1))
add_trend(ax1, y_tseries, grass_Cf, col3_map[i], "Exp_{0}".format(i+1))
# limits
ax1.set_ylim([100, 1800])
ax1.set_xlim([2000, 2015])
# axis
newXax = np.arange(2001, 2015, 1)
newYax = np.arange(200, 1800, 100)
ax1.xaxis.set_ticks(newXax)
ax1.yaxis.set_ticks(newYax)
ax1.xaxis.set_ticklabels(newXax, rotation=45, ha="right", fontsize=13)
ax1.yaxis.set_ticklabels(newYax, fontsize=13)
# labels
ax1.set_title("Howard Springs IAV Experiments (2001 to 2015)")
ax1.set_ylabel(r"Gross Primary Productivity (gC m$^{-2}$ s$^{-1}$)")
ax1.yaxis.set_label_coords(-0.1, 0.5)
# legendsz
leg1 = add_mylegend(ax1, part=0, title="Total", step=3, xloc=1.15, yloc=0.85, fontsize=9)
leg2 = add_mylegend(ax1, part=1, title="Tree", step=3, xloc=1.15, yloc=0.63, fontsize=9)
leg3 = add_mylegend(ax1, part=2, title="Grass", step=3, xloc=1.15, yloc=0.13, fontsize=9)
plt.gca().add_artist(leg1)
plt.gca().add_artist(leg2)
plt.gca().add_artist(leg3)
ax1.grid(c='gray')
plt.subplots_adjust(left=0.1, right=0.76, bottom=0.1, top=0.95)
#plt.savefig(SAVEPATH)
plt.show()
return 1
def slicedict(d, s):
new_dict = {k:v for k, v in d.iteritems() if k.startswith(s)}
return OrderedDict(natsort.natsorted(new_dict.iteritems()))
def plot_gpp_trends_split(canopy_data_list, sname, exp_series=1):
plt.rcParams['lines.linewidth'] = 1.2
plt.rcParams.update({'mathtext.default': 'regular'})
# create plotting area
plt.figure(figsize=(10, 9))
plot_grid = gridspec.GridSpec(3, 1, hspace=0.1)
# create subplots
ax1 = plt.subplot(plot_grid[0])
ax2 = plt.subplot(plot_grid[1])
ax3 = plt.subplot(plot_grid[2])
if exp_series is 1:
temp_dict = slicedict(canopy_data_list, 'Sim_1')
else:
temp_dict = slicedict(canopy_data_list, 'Sim_2')
col2_map = get_cmap("jet")(np.linspace(0, 1, len(temp_dict)))
# for each experiment
for (i, (clab, canopy)) in enumerate(temp_dict.iteritems()):
# get values
trees_Cf = canopy["Atree"].values
grass_Cf = canopy["Agrass"].values
total_Cf = trees_Cf + grass_Cf
# time
y_tseries = range(2001, 2015)
# carbon
ax1.plot(y_tseries, total_Cf, 'o--', alpha=0.4, c=col2_map[i])
ax2.plot(y_tseries, trees_Cf, 'o--', alpha=0.4, c=col2_map[i])
ax3.plot(y_tseries, grass_Cf, 'o--', alpha=0.4, c=col2_map[i])
# y ~ x
add_trend(ax1, y_tseries, total_Cf, col2_map[i], clab)
add_trend(ax2, y_tseries, trees_Cf, col2_map[i], clab)
add_trend(ax3, y_tseries, grass_Cf, col2_map[i], clab)
# limits
ax1.set_xlim([2000, 2015])
ax2.set_xlim([2000, 2015])
ax3.set_xlim([2000, 2015])
# axis
newax = np.arange(2001, 2015, 1)
ax1.xaxis.set_ticks(newax)
ax2.xaxis.set_ticks(newax)
ax3.xaxis.set_ticks(newax)
ax1.xaxis.set_ticklabels([])
ax2.xaxis.set_ticklabels([])
ax3.xaxis.set_ticklabels(newax, rotation=45, ha="right", fontsize=13)
# labels
if exp_series is 1:
title_lab = "Howard Springs [2001 - 2015] :: Meteorology Experiments"
else:
title_lab = "Howard Springs [2001 - 2015] :: Climatology Experiments"
ax1.set_title(title_lab)
ax1.set_ylabel(r"Total GPP (gC m$^{-2}$ s$^{-1}$)")
ax2.set_ylabel(r"Tree GPP (gC m$^{-2}$ s$^{-1}$)")
ax3.set_ylabel(r"Grass GPP (gC m$^{-2}$ s$^{-1}$)")
ax1.yaxis.set_label_coords(-0.1, 0.5)
ax2.yaxis.set_label_coords(-0.1, 0.5)
ax3.yaxis.set_label_coords(-0.1, 0.5)
# legends
ax1.legend(loc="right", bbox_to_anchor=(1.35, 0.5), prop={'size':9}, ncol=1)
ax2.legend(loc="right", bbox_to_anchor=(1.35, 0.5), prop={'size':9}, ncol=1)
ax3.legend(loc="right", bbox_to_anchor=(1.35, 0.5), prop={'size':9}, ncol=1)
ax1.grid(c='gray')
ax2.grid(c='gray')
ax3.grid(c='gray')
plt.subplots_adjust(left=0.1, right=0.76, bottom=0.1, top=0.95)
plt.savefig(SAVEPATH + sname)
#plt.show()
return 1
def main():
# Get the leaf daily dataframe dictionary
leaf_dict = pickle.load(open(PKLPATH + "daily/daily_leaf.pkl", 'rb'))
albet = list(string.ascii_uppercase)[:7]
print albet
more_labels = ['Sim_1A'] + ['Sim_2'+i for i in albet] + ['Sim_1'+j for j in albet[1:]]
print more_labels
#new_dict = {newlab: oldval for (newlab, oldval) in zip(more_labels, leaf_dict.values())}
# for some reason the natural sorting isn't retained in the load
leaf_year = OrderedDict(natsort.natsorted({dlab: ldf[["Atree", "Agrass"]] \
.resample("A", how=lambda x: sum(x)*12) \
for (dlab, ldf) in zip(more_labels, leaf_dict.values())} \
.iteritems()))
# Create the plot
plot_gpp_trends_split(leaf_year, "HWS_GPP_trend_Exp1_Met.pdf", exp_series=1)
plot_gpp_trends_split(leaf_year, "HWS_GPP_trend_Exp2_Clim.pdf", exp_series=2)
return None
if __name__ == "__main__":
# set paths
PKLPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/pickled/")
SAVEPATH = os.path.expanduser("~/Savanna/Analysis/figures/IAV/")
# run main
main()
| cc0-1.0 | -6,373,555,353,771,403,000 | 34.021505 | 97 | 0.604851 | false |
altermarkive/Resurrecting-JimFleming-Numerai | src/ml-jimfleming--numerai/models/classifier/main.py | 1 | 5319 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import time
import random
random.seed(67)
import numpy as np
np.random.seed(67)
import pandas as pd
import tensorflow as tf
tf.set_random_seed(67)
from sklearn.utils import shuffle
from sklearn.metrics import log_loss, roc_auc_score
from tqdm import tqdm, trange
from model import Model
import os
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_epochs', 30, '')
tf.app.flags.DEFINE_integer('batch_size', 128, '')
def main(_):
df_train = pd.read_csv(os.getenv('PREPARED_TRAINING'))
df_valid = pd.read_csv(os.getenv('PREPARED_VALIDATING'))
df_test = pd.read_csv(os.getenv('PREPARED_TESTING'))
feature_cols = list(df_train.columns[:-1])
target_col = df_train.columns[-1]
X_train = df_train[feature_cols].values
y_train = df_train[target_col].values
X_valid = df_valid[feature_cols].values
y_valid = df_valid[target_col].values
X_test = df_test[feature_cols].values
prefix = os.getenv('STORING')
tsne_data_5 = np.load(os.path.join(prefix, 'tsne_2d_5p_poly.npz'))
tsne_data_15 = np.load(os.path.join(prefix, 'tsne_2d_15p_poly.npz'))
tsne_data_10 = np.load(os.path.join(prefix, 'tsne_2d_10p_poly.npz'))
tsne_data_30 = np.load(os.path.join(prefix, 'tsne_2d_30p_poly.npz'))
tsne_data_50 = np.load(os.path.join(prefix, 'tsne_2d_50p_poly.npz'))
X_train_concat = np.concatenate([
X_train,
tsne_data_5['train'],
tsne_data_15['train'],
tsne_data_50['train'],
], axis=1)
X_valid_concat = np.concatenate([
X_valid,
tsne_data_5['valid'],
tsne_data_15['valid'],
tsne_data_50['valid'],
], axis=1)
X_test_concat = np.concatenate([
X_test,
tsne_data_5['test'],
tsne_data_15['test'],
tsne_data_50['test'],
], axis=1)
num_features = X_train_concat.shape[1]
features = tf.placeholder(tf.float32, shape=[None, num_features], name='features')
targets = tf.placeholder(tf.int32, shape=[None], name='targets')
with tf.variable_scope('classifier'):
train_model = Model(features, targets, is_training=True)
with tf.variable_scope('classifier', reuse=True):
test_model = Model(features, targets, is_training=False)
best = None
wait = 0
summary_op = tf.summary.merge_all()
logdir = '{}logs/classifier_{}'.format(prefix, int(time.time()))
supervisor = tf.train.Supervisor(logdir=logdir, summary_op=None)
with supervisor.managed_session() as sess:
summary_writer = tf.summary.FileWriter(logdir)
print('Training model with {} parameters...'.format(train_model.num_parameters))
with tqdm(total=FLAGS.num_epochs) as pbar:
for epoch in range(FLAGS.num_epochs):
X_train_epoch, y_train_epoch = shuffle(X_train_concat, y_train)
num_batches = len(y_train_epoch) // FLAGS.batch_size
losses = []
for batch_index in range(num_batches):
batch_start = batch_index * FLAGS.batch_size
batch_end = batch_start + FLAGS.batch_size
X_train_batch = X_train_epoch[batch_start:batch_end]
y_train_batch = y_train_epoch[batch_start:batch_end]
_, loss = sess.run([
train_model.train_step,
train_model.loss,
], feed_dict={
features: X_train_batch,
targets: y_train_batch,
})
losses.append(loss)
loss_train = np.mean(losses)
loss_valid, summary_str = sess.run([
test_model.loss,
summary_op
], feed_dict={
features: X_valid_concat,
targets: y_valid,
})
if best is None or loss_valid < best:
best = loss_valid
wait = 0
else:
wait += 1
summary_writer.add_summary(summary_str, epoch)
summary_writer.flush()
pbar.set_description('[{}] (train) loss: {:.8f}, (valid) loss: {:.8f} [best: {:.8f}, wait: {}]' \
.format(epoch, loss_train, loss_valid, best, wait))
pbar.update()
summary_writer.add_graph(sess.graph)
summary_writer.flush()
summary_writer.close()
p_valid = sess.run(test_model.predictions, feed_dict={
features: X_valid_concat,
})
loss = log_loss(y_valid, p_valid[:,1])
auc = roc_auc_score(y_valid, p_valid[:,1])
print('Validation Prediction Loss: {}, AUC: {}'.format(loss, auc))
p_test = sess.run(test_model.predictions, feed_dict={
features: X_test_concat,
})
df_pred = pd.DataFrame({
'id': df_test['id'],
'probability': p_test[:,1]
})
csv_path = os.getenv('PREDICTING')
df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None)
print('Saved: {}'.format(csv_path))
if __name__ == "__main__":
tf.app.run()
| mit | -8,530,050,815,120,115,000 | 32.037267 | 113 | 0.562136 | false |
jocelynj/weboob | weboob/tools/parsers/__init__.py | 1 | 2010 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Christophe Benz, Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import logging
__all__ = ['get_parser', 'NoParserFound']
class NoParserFound(Exception): pass
def load_lxml():
from .lxmlparser import LxmlHtmlParser
return LxmlHtmlParser
def load_lxmlsoup():
from .lxmlsoupparser import LxmlSoupParser
return LxmlSoupParser
def load_html5lib():
from .html5libparser import Html5libParser
return Html5libParser
def load_elementtidy():
from .elementtidyparser import ElementTidyParser
return ElementTidyParser
def load_builtin():
from .htmlparser import HTMLParser
return HTMLParser
def get_parser(preference_order=('lxml', 'lxmlsoup', 'html5lib', 'elementtidy', 'builtin')):
"""
Get a parser from a preference order list.
This allows Weboob to run on systems without lxml, which is the default parser.
Return a parser implementing IParser.
"""
if not isinstance(preference_order, (tuple, list)):
preference_order = [preference_order]
for kind in preference_order:
if not 'load_%s' % kind in globals():
continue
try:
return globals()['load_%s' % kind]()
except ImportError:
logging.debug('%s is not installed.' % kind)
raise NoParserFound("No parser found (%s)" % ','.join(preference_order))
| gpl-3.0 | -8,723,864,577,178,292,000 | 30.40625 | 92 | 0.706965 | false |
mattclark/osf.io | addons/dataverse/views.py | 1 | 9845 | """Views for the node settings page."""
# -*- coding: utf-8 -*-
import httplib as http
from django.utils import timezone
from django.core.exceptions import ValidationError
from requests.exceptions import SSLError
from flask import request
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from addons.base import generic_views
from addons.dataverse import client
from addons.dataverse.models import DataverseProvider
from addons.dataverse.settings import DEFAULT_HOSTS
from addons.dataverse.serializer import DataverseSerializer
from dataverse.exceptions import VersionJsonNotFoundError, OperationFailedError
from osf.models import ExternalAccount
from osf.utils.permissions import WRITE
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public
)
from website.util import rubeus, api_url_for
SHORT_NAME = 'dataverse'
FULL_NAME = 'Dataverse'
dataverse_account_list = generic_views.account_list(
SHORT_NAME,
DataverseSerializer
)
dataverse_import_auth = generic_views.import_auth(
SHORT_NAME,
DataverseSerializer
)
dataverse_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
dataverse_get_config = generic_views.get_config(
SHORT_NAME,
DataverseSerializer
)
## Auth ##
@must_be_logged_in
def dataverse_user_config_get(auth, **kwargs):
"""View for getting a JSON representation of the logged-in user's
Dataverse user settings.
"""
user_addon = auth.user.get_addon('dataverse')
user_has_auth = False
if user_addon:
user_has_auth = user_addon.has_auth
return {
'result': {
'userHasAuth': user_has_auth,
'urls': {
'create': api_url_for('dataverse_add_user_account'),
'accounts': api_url_for('dataverse_account_list'),
},
'hosts': DEFAULT_HOSTS,
},
}, http.OK
## Config ##
@must_be_logged_in
def dataverse_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
user = auth.user
provider = DataverseProvider()
host = request.json.get('host').rstrip('/')
api_token = request.json.get('api_token')
# Verify that credentials are valid
client.connect_or_error(host, api_token)
# Note: `DataverseSerializer` expects display_name to be a URL
try:
provider.account = ExternalAccount(
provider=provider.short_name,
provider_name=provider.name,
display_name=host, # no username; show host
oauth_key=host, # hijacked; now host
oauth_secret=api_token, # hijacked; now api_token
provider_id=api_token, # Change to username if Dataverse allows
)
provider.account.save()
except ValidationError:
# ... or get the old one
provider.account = ExternalAccount.objects.get(
provider=provider.short_name,
provider_id=api_token
)
if not user.external_accounts.filter(id=provider.account.id).exists():
user.external_accounts.add(provider.account)
user_addon = auth.user.get_addon('dataverse')
if not user_addon:
user.add_addon('dataverse')
user.save()
# Need to ensure that the user has dataverse enabled at this point
user.get_or_add_addon('dataverse', auth=auth)
user.save()
return {}
@must_have_permission(WRITE)
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_set_config(node_addon, auth, **kwargs):
"""Saves selected Dataverse and dataset to node settings"""
user_settings = node_addon.user_settings
user = auth.user
if user_settings and user_settings.owner != user:
raise HTTPError(http.FORBIDDEN)
alias = request.json.get('dataverse', {}).get('alias')
doi = request.json.get('dataset', {}).get('doi')
if doi is None or alias is None:
return HTTPError(http.BAD_REQUEST)
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
node_addon.set_folder(dataverse, dataset, auth)
return {'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK
@must_have_permission(WRITE)
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_datasets(node_addon, **kwargs):
"""Get list of datasets from provided Dataverse alias"""
alias = request.json.get('alias')
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
datasets = client.get_datasets(dataverse)
ret = {
'alias': alias, # include alias to verify dataset container
'datasets': [{'title': dataset.title, 'doi': dataset.doi} for dataset in datasets],
}
return ret, http.OK
## Crud ##
@must_have_permission(WRITE)
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_publish_dataset(node_addon, auth, **kwargs):
node = node_addon.owner
publish_both = request.json.get('publish_both', False)
now = timezone.now()
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
if publish_both:
client.publish_dataverse(dataverse)
client.publish_dataset(dataset)
# Add a log
node.add_log(
action='dataverse_dataset_published',
params={
'project': node.parent_id,
'node': node._id,
'dataset': dataset.title,
},
auth=auth,
log_date=now,
)
return {'dataset': dataset.title}, http.OK
## HGRID ##
def _dataverse_root_folder(node_addon, auth, **kwargs):
node = node_addon.owner
default_version = 'latest-published'
version = 'latest-published' if not node.can_edit(auth) else default_version
# Quit if no dataset linked
if not node_addon.complete:
return []
can_edit = node.can_edit(auth)
permissions = {
'edit': can_edit and not node.is_registration,
'view': node.can_view(auth)
}
try:
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
except SSLError:
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
permissions=permissions,
private_key=kwargs.get('view_only', None),
)]
# Quit if doi does not produce a dataset
if dataset is None:
return []
published_files = client.get_files(dataset, published=True)
# Produce draft version or quit if no published version is available
if not published_files:
if can_edit:
version = 'latest'
else:
return []
urls = {
'publish': node.api_url_for('dataverse_publish_dataset'),
}
# determine if there are any changes between the published and draft
# versions of the dataset
try:
dataset.get_metadata('latest-published')
dataset_is_published = True
dataset_draft_modified = dataset.get_state() == 'DRAFT'
except VersionJsonNotFoundError:
dataset_is_published = False
dataset_draft_modified = True
# Get the dataverse host
# (stored in oauth_key because dataverse doesn't use that)
dataverse_host = node_addon.external_account.oauth_key
try:
host_custom_publish_text = client.get_custom_publish_text(connection)
except OperationFailedError:
host_custom_publish_text = ''
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
urls=urls,
permissions=permissions,
dataset=node_addon.dataset,
doi=dataset.doi,
dataverse=dataverse.title,
hasPublishedFiles=bool(published_files),
dataverseIsPublished=dataverse.is_published,
datasetIsPublished=dataset_is_published,
datasetDraftModified=dataset_draft_modified,
version=version,
host=dataverse_host,
hostCustomPublishText=host_custom_publish_text,
private_key=kwargs.get('view_only', None),
)]
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_root_folder(node_addon, auth, **kwargs):
return _dataverse_root_folder(node_addon, auth=auth)
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_widget_contents(node_addon, **kwargs):
data = {
'connected': False,
}
if not node_addon.complete:
return {'data': data}, http.OK
doi = node_addon.dataset_doi
alias = node_addon.dataverse_alias
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
if dataset is None:
return {'data': data}, http.BAD_REQUEST
dataverse_host = node_addon.external_account.oauth_key
dataverse_url = 'http://{0}/dataverse/{1}'.format(dataverse_host, alias)
dataset_url = 'https://doi.org/' + doi
data.update({
'connected': True,
'dataverse': node_addon.dataverse,
'dataverseUrl': dataverse_url,
'dataset': node_addon.dataset,
'doi': doi,
'datasetUrl': dataset_url,
'citation': dataset.citation,
})
return {'data': data}, http.OK
| apache-2.0 | 4,357,396,554,869,335,600 | 29.107034 | 91 | 0.663586 | false |
France-ioi/taskgrader | examples/taskTurtle/tests/gen/runner.py | 1 | 3332 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import json, sys, traceback
import turtle
from functools import partial
class LoggedTurtle(object):
"""Class emulating Turtle behavior while logging all commands.
It won't actually display anything, it will only execute movement commands
through a TNavigator.
The class' log variable will contain the log for all LoggedTurtles."""
log = []
next_id = 0
def _log(self, items):
"""Add to log."""
self.__class__.log.append(items)
def __init__(self, *args, **kwargs):
# Turtle ID
self.tid = self.__class__.next_id
self.__class__.next_id += 1
# Navigator which will handle all movements
self.navigator = turtle.TNavigator()
self.navigator.speed(0)
# Log initialization
self._log((self.tid, 'turtle', '__init__', args, kwargs))
def logNavigator(self, *args, **kwargs):
# Log a movement command and execute it
funcName = kwargs.pop('funcName')
self._log((self.tid, 'nav', funcName, args, kwargs))
return getattr(self.navigator, funcName)(*args, **kwargs)
def logTurtle(self, *args, **kwargs):
# Log a non-movement command
funcName = kwargs.pop('funcName')
self._log((self.tid, 'turtle', funcName, args, kwargs))
def __getattr__(self, attr):
# Handle calls to this class
# Check if it's a movement command
if hasattr(self.navigator, attr):
subAttr = getattr(self.navigator, attr)
if hasattr(subAttr, '__call__'):
return partial(self.logNavigator, funcName=attr)
else:
return subAttr
# Check if it's another Turtle command
elif hasattr(turtle.Turtle, attr):
subAttr = getattr(turtle.Turtle, attr)
if hasattr(subAttr, '__call__'):
return partial(self.logTurtle, funcName=attr)
else:
return subAttr
# Not a Turtle command at all
else:
raise AttributeError
def changeTurtle(scriptPath):
"""Modify a script to use the LoggedTurtle."""
newScriptLines = []
for l in open(scriptPath, 'r'):
# Remove turtle from imports
if l[:6] == 'import':
imported = map(lambda x: x.strip(), l[7:].strip().split(','))
if 'turtle' in imported:
imported.remove('turtle')
if len(imported) > 0:
newScriptLines.append("import %s\n" % ', '.join(imported))
# Modify Turtle instances to LoggedTurtle instances
if 'Turtle' in l:
newl = l.replace('turtle.Turtle(', 'LoggedTurtle(')
newl = newl.replace('Turtle(', 'LoggedTurtle(')
newl = newl.replace('LoggedLoggedTurtle', 'LoggedTurtle') # safety
newScriptLines.append(newl)
else:
newScriptLines.append(l)
open(scriptPath, 'w').writelines(newScriptLines)
# Modify the solution
changeTurtle("solution.py")
# Execute the solution
try:
execfile("solution.py")
except:
# Remove the runner from the traceback
excInfo = sys.exc_info()
traceback.print_exception(excInfo[0], excInfo[1], excInfo[2].tb_next)
sys.exit(1)
# Output as JSON
print(json.dumps(LoggedTurtle.log))
| mit | -2,496,013,598,061,500,000 | 30.433962 | 78 | 0.596639 | false |
krig/parsnip | parsnip/parsers.py | 1 | 9215 | import six
import re
from contextlib import contextmanager
from functools import wraps
from inspect import getdoc
from .errors import NoMatch
from .tokens import END
@contextmanager
def _savestate(tokens):
tmp = tokens._curr
try:
yield
except NoMatch:
tokens._curr = tmp
def parsnip(doc=None):
def parsnip2(parser):
parser.__doc__ = doc
@wraps(parser)
def run(tokens):
try:
return parser(tokens)
except NoMatch as e:
if e.expected == parser.__doc__ or e.passthrough:
raise e
raise NoMatch(got=e.got,
expected=parser.__doc__,
caused_by=e if e.expected else None)
return run
return parsnip2
def text(txt):
"leaf parser: matches text case-insensitively"
txt = txt.lower()
@parsnip(doc=txt)
def ptext(tok):
v = tok.next().lower()
if v == txt:
return v
raise NoMatch(v)
return ptext
def regex(rx, doc=None, flags=re.I):
"leaf parser: matches regex"
if isinstance(rx, six.string_types):
if not rx.endswith('$'):
rx = rx + '$'
c = re.compile(rx, flags=flags)
else:
c = rx
@parsnip(doc=doc or c.pattern.rstrip('$'))
def pregex(tok):
v = tok.next()
m = c.match(v)
if m and m.groups():
return m.groups()
elif m:
return m.string
raise NoMatch(v)
return pregex
def seq(*parsers):
"match all in sequence"
def pseq(tok):
ret = []
start = tok._curr
for p in parsers:
try:
ret.append(p(tok))
except NoMatch as e:
prevtok = tok.getTokens(start, start + len(ret))
got = ' '.join(prevtok + [str(tok.peekCurrent())])
expected = ' '.join(prevtok + [p.__doc__])
raise NoMatch(got=got,
expected=expected,
caused_by=e)
return ret
pseq.__doc__ = " ".join(getdoc(p) for p in parsers)
return pseq
def lift(parser):
"lifts the first return value of parser"
@parsnip(doc=getdoc(parser))
def plift(tok):
r = parser(tok)
if r is None:
return r
return r[0]
return plift
def lift2(parser):
"lifts the second return value of parser"
@parsnip(doc=getdoc(parser))
def plift2(tok):
return parser(tok)[1]
return plift2
def liftNth(parser, n):
"lifts the Nth return value of parser"
@parsnip(doc=getdoc(parser))
def pliftNth(tok):
return parser(tok)[n]
return pliftNth
def choice(*parsers):
"try to match all until one matches"
@parsnip(doc='(%s)' % (" | ".join(getdoc(p) for p in parsers)))
def pchoice(tok):
for p in parsers:
with _savestate(tok):
return p(tok)
raise NoMatch(tok.next())
return pchoice
def choiceConsumeAll(*parsers):
"""
try to match until one matches.
if a match is partial but doesn't consume all tokens,
this fails
"""
@parsnip(doc='(%s)' % (" | ".join(getdoc(p) for p in parsers)))
def pchoice(tok):
longest_match = (0, None)
tok.resetLongestMatch()
for p in parsers:
try:
ret = p(tok)
if not tok.empty():
mplus = tok.next()
prevtok = tok.getTokens(*tok.getLongestMatch())
got = ' '.join(prevtok + [str(mplus)])
raise NoMatch(got=got,
expected=getdoc(p) + ' <END>',
passthrough=True)
return ret
except NoMatch as e:
if e.passthrough:
raise e
lf, lt = tok.getLongestMatch()
nmatch = lt - lf
if nmatch > longest_match[0]:
prevtok = tok.getTokens(lf, lt)
got = ' '.join(prevtok + [str(tok.peekCurrent())])
expected = p.__doc__
longest_match = (nmatch,
NoMatch(got=got,
expected=expected,
caused_by=e))
tok._curr = lf
if longest_match[1]:
longest_match[1].passthrough = True
raise longest_match[1]
else:
raise NoMatch(tok.next())
return pchoice
def option(p, value=None):
"always succeeds, if p doesn't match, value is returned as match"
@parsnip(doc='[%s]' % (getdoc(p)))
def poption(tok):
with _savestate(tok):
return p(tok)
return value
return poption
def matchNM(p, n, m):
"match between N and M instances of p"
@parsnip(doc='%s{%d,%d}' % (getdoc(p), n, m))
def pmatchNM(tok):
if n == 0:
ret = []
else:
ret = [p(tok) for _ in xrange(0, n)]
for _ in xrange(n, m):
with _savestate(tok):
ret.append(p(tok))
continue
break
return ret
return pmatchNM
def exactlyN(p, n):
"match exactly N instances of p"
@parsnip(doc='%s{%d}' % (getdoc(p), n))
def pexactlyN(tok):
return [p(tok) for _ in xrange(0, n)]
return pexactlyN
def tag(p, name):
"tags match from p with name"
@parsnip(doc=getdoc(p))
def ptag(tok):
ret = p(tok)
tok.set_tag(name, ret)
return ret
return ptag
def tagfn(p, name, fn):
"saves output of fn(val) in tag"
@parsnip(doc=getdoc(p))
def ptagfn(tok):
ret = p(tok)
tok.set_tag(name, fn(ret))
return ret
return ptagfn
def mapfn(parser, fn):
"""pass output from parser through fn
and use that instead"""
@parsnip(doc=getdoc(parser))
def pmapfn(tok):
return fn(parser(tok))
return pmapfn
def maptags(parser, fn):
"""discard output from parser, pass
tag dict to fn and use output as result"""
@parsnip(doc=getdoc(parser))
def pmaptags(tok):
parser(tok)
return fn(tok.tags)
return pmaptags
def loopref(name):
"""returns a loop reference used to backpatch
recursive grammars"""
@parsnip(doc=name)
def ploopref(tok):
return ploopref.func_dict['loop'](tok)
ploopref.func_dict['loop'] = None
return ploopref
def loop(parser, ref):
"enables parser as a recursive parser that can loop on itself"
ref.func_dict['loop'] = parser
return parser
def many(p, min=0):
"match several p's, but at least <min>"
def manydoc():
if min == 0:
return '[%s ...]' % (getdoc(p))
else:
return '%s ...' % (getdoc(p))
@parsnip(doc=manydoc())
def pmany(tok):
acc = []
while True:
with _savestate(tok):
v = p(tok)
acc.append(v)
continue
if len(acc) < min:
raise NoMatch(got=tok.peekNext())
break
return acc
return pmany
def many1(p):
"match one or more p"
return many(p, min=1)
def unless(p):
"if p matches, this fails"
@parsnip(doc="!%s" % (getdoc(p)))
def punless(tok):
try:
ret = p(tok)
except NoMatch as e:
return e.got
raise NoMatch(ret)
return punless
def manyTill(p):
"matches zero or more tokens until p succeeds or all tokens are consumed"
return many(unless(p))
def manyAndTill(p):
"""matches zero or more tokens until p succeeds,
then matches p as well (so end sentinel is consumed)"""
return seq(many(unless(p)), p)
def sep(parser, separator):
@parsnip(doc='[%s [%s %s] ...]' % (getdoc(parser), getdoc(separator), getdoc(parser)))
def psep(tok):
acc = []
with _savestate(tok):
v = parser(tok)
acc.append(v)
if not acc:
return acc
while True:
with _savestate(tok):
separator(tok)
with _savestate(tok):
v = parser(tok)
acc.append(v)
continue
break
return acc
return psep
def anything():
"match ...anything."
@parsnip(doc='*')
def panything(tok):
return tok.next()
return panything
def end():
"match the end of input"
@parsnip(doc=str(END))
def pend(tok):
if not tok.empty():
raise NoMatch(tok.next())
return END
return pend
def wrap(*args):
"""
== seq(text(left), *p, text(right))
"""
args = list(args)
args[0] = text(args[0])
args[-1] = text(args[-1])
return seq(*args)
def doc(parser, text):
"""
Replace documentation for parser with the given text
"""
parser.__doc__ = text
return parser
def parse(parser, ts):
"""
Invokes the given parser on the given
token stream.
"""
return parser(ts)
| mit | -6,151,438,101,351,395,000 | 23.507979 | 90 | 0.515789 | false |
gnarula/eden_deployment | modules/s3db/survey.py | 1 | 132631 | # -*- coding: utf-8 -*-
""" Sahana Eden Survey Tool
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
ADAT - Assessment Data Analysis Tool
For more details see the blueprint at:
http://eden.sahanafoundation.org/wiki/BluePrint/SurveyTool/ADAT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3SurveyTemplateModel",
"S3SurveyQuestionModel",
"S3SurveyFormatterModel",
"S3SurveySeriesModel",
"S3SurveyCompleteModel",
"S3SurveyTranslateModel",
"survey_template_represent",
"survey_answer_list_represent",
"survey_template_rheader",
"survey_series_rheader",
"survey_getAllSectionsForTemplate",
"survey_getAllQuestionsForTemplate",
"survey_buildQuestionnaireFromTemplate",
"survey_buildQuestionnaireFromSeries",
"survey_getTemplateFromSeries",
"survey_getAllTemplates",
"survey_getAllWidgetsForTemplate",
"survey_getWidgetFromQuestion",
"survey_getAllSectionsForSeries",
"survey_getAllSectionsForTemplate",
"survey_getQuestionFromCode",
"survey_getAllQuestionsForTemplate",
"survey_getAllQuestionsForSeries",
"survey_getAllQuestionsForComplete",
"survey_save_answers_for_series",
"survey_updateMetaData",
"survey_getAllAnswersForQuestionInSeries",
"survey_getQstnLayoutRules",
"survey_getSeries",
"survey_getSeriesName",
"survey_getAllSeries",
"survey_getAllTranslationsForTemplate",
"survey_getAllTranslationsForSeries",
"survey_build_template_summary",
"survey_serieslist_dataTable_post",
"survey_answerlist_dataTable_pre",
"survey_answerlist_dataTable_post",
"survey_json2py",
"survey_json2list",
]
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3chart import S3Chart
from s3survey import survey_question_type, \
survey_analysis_type, \
_debug
# =============================================================================
def json2py(jsonstr):
"""
Utility function to convert a string in json to a python structure
"""
from xml.sax.saxutils import unescape
if not isinstance(jsonstr, str):
return jsonstr
try:
jsonstr = unescape(jsonstr, {"u'": '"'})
jsonstr = unescape(jsonstr, {"'": '"'})
pythonStructure = json.loads(jsonstr)
except:
_debug("ERROR: attempting to convert %s using modules/s3db/survey/json2py.py" % (jsonstr))
return jsonstr
else:
return pythonStructure
survey_json2py = json2py
# =============================================================================
def json2list(jsonstr):
"""
Used to modify a json string to a python list.
"""
if jsonstr == "":
valueList = []
else:
if jsonstr[0] == "[":
valueList = json2py(jsonstr)
else:
valueList = jsonstr.split(",")
if not isinstance(valueList, list):
valueList = [valueList]
return valueList
survey_json2list = json2list
# =============================================================================
class S3SurveyTemplateModel(S3Model):
"""
Template model
The template model is a container for the question model
"""
names = ("survey_template",
"survey_template_id",
"survey_section",
"survey_section_id",
"survey_template_status",
)
def model(self):
T = current.T
db = current.db
template_status = {
1: T("Pending"),
2: T("Active"),
3: T("Closed"),
4: T("Master")
}
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_template
#
# The template is the root table and acts as a container for
# the questions that will be used in a survey.
tablename = "survey_template"
define_table(tablename,
Field("name", "string", length=120,
notnull=True, unique=True,
default = "",
label = T("Template Name"),
),
Field("description", "text", length=500,
default = "",
label = T("Description"),
),
Field("status", "integer",
default = 1,
label = T("Status"),
requires = IS_IN_SET(template_status,
zero=None),
represent = lambda index: \
template_status[index],
#readable=True,
writable = False,
),
# Standard questions which may belong to all template
# competion_qstn: who completed the assessment
Field("competion_qstn", "string", length=200,
label = T("Completion Question"),
),
# date_qstn: when it was completed (date)
Field("date_qstn", "string", length=200,
label = T("Date Question"),
),
# time_qstn: when it was completed (time)
Field("time_qstn", "string", length=200,
label = T("Time Question"),
),
# location_detail: json of the location question
# May consist of any of the following:
# L0, L1, L2, L3, L4, Lat, Lon
Field("location_detail", "string", length=200,
label = T("Location Detail"),
),
# The priority question is the default question used
# to determine the priority of each point on the map.
# The data is stored as the question code.
Field("priority_qstn", "string", length=16,
label = T("Default map question"),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Assessment Template"),
title_display = T("Assessment Template Details"),
title_list = T("Assessment Templates"),
title_analysis_summary = T("Template Summary"),
title_update = T("Edit Assessment Template"),
title_question_details = T("Details of each question in the Template"),
subtitle_analysis_summary = T("Summary by Question Type - (The fewer text questions the better the analysis can be)"),
label_list_button = T("List Assessment Templates"),
label_delete_button = T("Delete this Assessment Template"),
msg_record_created = T("Assessment Template added"),
msg_record_modified = T("Assessment Template updated"),
msg_record_deleted = T("Assessment Template deleted"),
msg_list_empty = T("No Assessment Templates"))
template_id = S3ReusableField("template_id", "reference %s" % tablename,
sortby="name",
label=T("Template"),
requires = IS_ONE_OF(db,
"survey_template.id",
self.survey_template_represent,
),
represent = self.survey_template_represent,
ondelete = "CASCADE")
# Components
add_components(tablename,
survey_series="template_id",
survey_translate="template_id",
)
configure(tablename,
deduplicate = self.survey_template_duplicate,
onaccept = self.template_onaccept,
onvalidation = self.template_onvalidate,
)
# ---------------------------------------------------------------------
# survey_sections
#
# The questions can be grouped into sections this provides
# the description of the section and
# the position of the section within the template
tablename = "survey_section"
define_table(tablename,
Field("name", "string", length=120,
notnull = True,
default = "",
),
Field("description", "text", length=500,
default = "",
),
Field("posn", "integer"),
Field("cloned_section_id", "integer",
readable = False,
writable = False,
),
template_id(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Template Section"),
title_display = T("Template Section Details"),
title_list = T("Template Sections"),
title_update = T("Edit Template Section"),
label_list_button = T("List Template Sections"),
label_delete_button = T("Delete this Template Section"),
msg_record_created = T("Template Section added"),
msg_record_modified = T("Template Section updated"),
msg_record_deleted = T("Template Section deleted"),
msg_list_empty = T("No Template Sections"))
configure(tablename,
deduplicate = self.survey_section_duplicate,
orderby = tablename + ".posn",
)
section_id = S3ReusableField("section_id", "reference %s" % tablename,
readable = False,
writable = False,
)
# Pass names back to global scope (s3.*)
return dict(survey_template_id = template_id,
survey_template_status = template_status,
survey_section_id = section_id,
)
# -------------------------------------------------------------------------
@staticmethod
def template_onvalidate(form):
"""
It is not valid to re-import a template that already has a
status of Active or higher
"""
template_id = form.vars.id
table = current.s3db.survey_template
row = current.db(table.id == template_id).select(table.status,
limitby=(0, 1)
).first()
if row is not None and row.status > 1:
return False
return True
# -------------------------------------------------------------------------
@staticmethod
def addQuestion(template_id, name, code, notes, type, posn, metadata={}):
"""
"""
db = current.db
s3db = current.s3db
# Add the question to the database if it's not already there
qstntable = s3db.survey_question
query = (qstntable.name == name) & \
(qstntable.code == code)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if record:
qstn_id = record.id
else:
qstn_id = qstntable.insert(name = name,
code = code,
notes = notes,
type = type
)
qstn_metadata_table = s3db.survey_question_metadata
for (descriptor, value) in metadata.items():
qstn_metadata_table.insert(question_id = qstn_id,
descriptor = descriptor,
value = value
)
# Add these questions to the section: "Background Information"
sectable = s3db.survey_section
section_name = "Background Information"
query = (sectable.name == section_name) & \
(sectable.template_id == template_id)
record = db(query).select(sectable.id, limitby=(0, 1)).first()
if record:
section_id = record.id
else:
section_id = sectable.insert(name = section_name,
template_id = template_id,
posn = 0 # special section with no position
)
# Add the question to the list of questions in the template
qstn_list_table = s3db.survey_question_list
query = (qstn_list_table.question_id == qstn_id) & \
(qstn_list_table.template_id == template_id)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if not record:
qstn_list_table.insert(question_id = qstn_id,
template_id = template_id,
section_id = section_id,
posn = posn
)
# -------------------------------------------------------------------------
@staticmethod
def template_onaccept(form):
"""
All of the standard questions will now be generated
competion_qstn: who completed the assessment
date_qstn: when it was completed (date)
time_qstn: when it was completed (time)
location_detail: json of the location question
May consist of any of the following:
L0, L1, L2, L3, L4, Lat, Lon
for json entry a question will be generated
The code for each question will start with "STD-" followed by
the type of question.
"""
vars = form.vars
if vars.id:
template_id = vars.id
else:
return
addQuestion = S3SurveyTemplateModel.addQuestion
if vars.competion_qstn != None:
name = vars.competion_qstn
code = "STD-WHO"
notes = "Who completed the assessment"
type = "String"
posn = -10 # negative used to force these question to appear first
addQuestion(template_id, name, code, notes, type, posn)
if vars.date_qstn != None:
name = vars.date_qstn
code = "STD-DATE"
notes = "Date the assessment was completed"
type = "Date"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.time_qstn != None:
name = vars.time_qstn
code = "STD-TIME"
notes = "Time the assessment was completed"
type = "Time"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.location_detail != None:
locationList = json2py(vars.location_detail)
if len(locationList) > 0:
name = "The location P-code"
code = "STD-P-Code"
type = "String"
posn += 1
addQuestion(template_id, name, code, None, type, posn)
for loc in locationList:
if loc == "Lat":
name = "Latitude"
elif loc == "Lon":
name = "Longitude"
else:
name = loc
code = "STD-%s" % loc
if loc == "Lat" or loc == "Lon":
type = "Numeric"
metadata = {"Format": "nnn.nnnnnn"}
else:
type = "Location"
metadata = {}
posn += 1
addQuestion(template_id, name, code, "", type, posn, metadata)
# -------------------------------------------------------------------------
@staticmethod
def survey_template_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_template":
table = job.table
data = job.data
name = data.get("name")
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_section_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name
- the same template
- and the same position within the template
- however if their is a record with position of zero then that record should be updated
"""
if job.tablename == "survey_section":
table = job.table
data = job.data
name = "name" in data and data.name
template = "template_id" in data and data.template_id
query = (table.name == name) & \
(table.template_id == template)
return duplicator(job, query)
# =============================================================================
def survey_template_represent(id, row=None):
"""
Display the template name rather than the id
"""
if row:
return row.name
elif not id:
return current.messages["NONE"]
table = current.s3db.survey_template
query = (table.id == id)
record = current.db(query).select(table.name,
limitby=(0, 1)).first()
try:
return record.name
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def survey_template_rheader(r, tabs=[]):
"""
The template rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if tablename == "survey_template" and record:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Basic Details"), "read"),
(T("Question Details"),"templateRead/"),
(T("Question Summary"),"templateSummary/"),
#(T("Sections"), "section"),
]
if current.auth.s3_has_permission("create", "survey_translate"):
tabs.append((T("Translate"),"translate"))
rheader_tabs = s3_rheader_tabs(r, tabs)
sectionTable = s3db.survey_section
qlistTable = s3db.survey_question_list
viewing = current.request.get_vars.get("viewing", None)
if viewing:
dummy, template_id = viewing.split(".")
else:
template_id = r.id
query = (qlistTable.template_id == template_id) & \
(qlistTable.section_id == sectionTable.id)
rows = current.db(query).select(sectionTable.id,
sectionTable.name,
orderby = qlistTable.posn)
tsection = TABLE(_class="survey-section-list")
lblSection = SPAN(T("Sections that are part of this template"),
_style="font-weight:bold;")
if (rows.__len__() == 0):
rsection = SPAN(T("As of yet, no sections have been added to this template."))
else:
rsection = TR()
count = 0
lastSection = ""
for section in rows:
if section.name == lastSection:
continue
rsection.append(TD(section.name))
# Comment out the following until templates can be built online
#rsection.append(TD(A(section.name,
# _href=URL(c="survey",
# f="section",
# args="%s" % section.id))))
lastSection = section.name
count += 1
if count % 4 == 0:
tsection.append(rsection)
rsection=TR()
tsection.append(rsection)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_template_status[record.status],
),
),
lblSection,
tsection,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getTemplateFromSeries(series_id):
"""
Return the template data from the series_id passed in
@ToDo: Remove wrapper
"""
stable = current.s3db.survey_series
ttable = current.s3db.survey_template
query = (stable.id == series_id) & \
(ttable.id == stable.template_id)
row = current.db(query).select(ttable.ALL,
limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getAllTemplates():
"""
Function to return all the templates on the database
@ToDo: Remove wrapper
"""
table = current.s3db.survey_template
rows = current.db(table).select()
return rows
# =============================================================================
def survey_getAllWidgetsForTemplate(template_id):
"""
Function to return the widgets for each question for the given
template. The widgets are returned in a dict with the key being
the question code.
"""
s3db = current.s3db
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.type,
q_ltable.posn,
)
widgets = {}
for row in rows:
sqrow = row.survey_question
qstnType = sqrow.type
qstn_id = sqrow.id
qstn_code = sqrow.code
qstn_posn = row.survey_question_list.posn
widgetObj = survey_question_type[qstnType](qstn_id)
widgets[qstn_code] = widgetObj
widgetObj.question["posn"] = qstn_posn
question = {}
return widgets
# =============================================================================
def survey_getAllSectionsForSeries(series_id):
"""
Function to return the list of sections for the given series
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
row = survey_getSeries(series_id)
template_id = row.template_id
return survey_getAllSectionsForTemplate(template_id)
# =============================================================================
def survey_buildQuestionnaireFromTemplate(template_id):
"""
Build a form displaying all the questions for a given template_id
@ToDo: Remove wrapper
"""
questions = survey_getAllQuestionsForTemplate(template_id)
return buildQuestionsForm(questions, readOnly=True)
# =============================================================================
def survey_getAllSectionsForTemplate(template_id):
"""
function to return the list of sections for the given template
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
sectable = current.s3db.survey_section
query = (sectable.template_id == template_id)
rows = current.db(query).select(sectable.id,
sectable.name,
sectable.template_id,
sectable.posn,
orderby = sectable.posn)
sections = []
for sec in rows:
sections.append({"section_id": sec.id,
"name" : sec.name,
"template_id": sec.template_id,
"posn" : sec.posn
}
)
return sections
# =============================================================================
def survey_getWidgetFromQuestion(question_id):
"""
Function that gets the right widget for the question
"""
qtable = current.s3db.survey_question
query = (qtable.id == question_id)
question = current.db(query).select(qtable.type,
limitby=(0, 1)).first()
qstnType = question.type
widgetObj = survey_question_type[qstnType](question_id)
return widgetObj
# =============================================================================
def buildQuestionsForm(questions, complete_id=None, readOnly=False):
"""
Create the form, hard-coded table layout :(
"""
form = FORM()
table = None
sectionTitle = ""
for question in questions:
if sectionTitle != question["section"]:
if sectionTitle != "":
form.append(P())
form.append(HR(_width="90%"))
form.append(P())
div = DIV(_class="survey_scrollable")
table = TABLE()
div.append(table)
form.append(div)
table.append(TR(TH(question["section"],
_colspan="2"),
_class="survey_section"))
sectionTitle = question["section"]
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if readOnly:
table.append(TR(TD(question["code"]),
TD(widgetObj.type_represent()),
TD(question["name"])
)
)
else:
if complete_id != None:
widgetObj.loadAnswer(complete_id, question["qstn_id"])
widget = widgetObj.display(question_id = question["qstn_id"])
if widget != None:
if isinstance(widget, TABLE):
table.append(TR(TD(widget, _colspan=2)))
else:
table.append(widget)
if not readOnly:
button = INPUT(_type="submit", _name="Save", _value=current.T("Save"))
form.append(button)
return form
# =============================================================================
def survey_build_template_summary(template_id):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="template_summary",
_class="dataTable display")
hr = TR(TH(T("Position")), TH(T("Section")))
qstnTypeList = {}
posn = 1
for (key, type) in survey_question_type.items():
if key == "Grid" or key == "GridChild":
continue
hr.append(TH(type().type_represent()))
qstnTypeList[key] = posn
posn += 1
hr.append(TH(T("Total")))
header = THEAD(hr)
numOfQstnTypes = len(survey_question_type) - 1 # exclude the grid questions
questions = survey_getAllQuestionsForTemplate(template_id)
sectionTitle = ""
line = []
body = TBODY()
section = 0
total = ["", T("Total")] + [0]*numOfQstnTypes
for question in questions:
if sectionTitle != question["section"]:
if line != []:
br = TR()
for cell in line:
br.append(cell)
body.append(br)
section += 1
sectionTitle = question["section"]
line = [section, sectionTitle] + [0]*numOfQstnTypes
if question["type"] == "Grid":
continue
if question["type"] == "GridChild":
# get the real grid question type
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
question["type"] = widgetObj.typeDescription
line[qstnTypeList[question["type"]]+1] += 1
line[numOfQstnTypes+1] += 1
total[qstnTypeList[question["type"]]+1] += 1
total[numOfQstnTypes+1] += 1
# Add the trailing row
br = TR()
for cell in line:
br.append(cell)
body.append(br)
# Add the footer to the table
foot = TFOOT()
tr = TR()
for cell in total:
tr.append(TD(B(cell))) # don't use TH() otherwise dataTables will fail
foot.append(tr)
table.append(header)
table.append(body)
table.append(foot)
# Turn off server side pagination
s3 = current.response.s3
s3.no_sspag = True
s3.no_formats = True
s3.dataTableID = None
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"template_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
dt_action_col = -1,
**attr
)
return form
# =============================================================================
class S3SurveyQuestionModel(S3Model):
"""
Question Model
"""
names = ("survey_question",
"survey_question_id",
"survey_question_metadata",
"survey_question_list",
"survey_qstn_name_represent",
)
def model(self):
T = current.T
s3 = current.response.s3
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_question
# Defines a question that will appear within a section, and thus belong
# to the template.
#
# This holds the actual question and
# A string code (unique within the template) is used to identify the question.
#
# It will have a type from the questionType dictionary.
# This type will determine the options that can be associated with it.
# A question can belong to many different sections.
# The notes are to help the enumerator and will typically appear as a
# footnote in the printed form.
tablename = "survey_question"
define_table(tablename,
Field("name", "string", length=200,
notnull=True,
represent = self.qstn_name_represent,
),
Field("code", "string", length=16,
notnull=True,
),
Field("notes", "string", length=400
),
Field("type", "string", length=40,
notnull=True,
),
Field("metadata", "text",
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create an Assessment Question"),
title_display = T("Assessment Question Details"),
title_list = T("Assessment Questions"),
title_update = T("Edit Assessment Question"),
label_list_button = T("List Assessment Questions"),
label_delete_button = T("Delete this Assessment Question"),
msg_record_created = T("Assessment Question added"),
msg_record_modified = T("Assessment Question updated"),
msg_record_deleted = T("Assessment Question deleted"),
msg_list_empty = T("No Assessment Questions"))
configure(tablename,
deduplicate = self.survey_question_duplicate,
onaccept = self.question_onaccept,
onvalidation = self.question_onvalidate,
)
question_id = S3ReusableField("question_id", "reference %s" % tablename,
readable = False,
writable = False,
)
# ---------------------------------------------------------------------
# survey_question_metadata
# referenced by
# the survey_question table and is used to manage
# the metadata that will be associated with a question type.
# For example: if the question type is option, then valid metadata
# might be:
# count: the number of options that will be presented: 3
# 1 : the first option : Female
# 2 : the second option : Male
# 3 : the third option : Not Specified
# So in the above case a question record will be associated with four
# question_metadata records.
tablename = "survey_question_metadata"
define_table(tablename,
question_id(),
Field("descriptor",
"string",
length=20,
notnull=True,
),
Field("value",
"text",
notnull=True,
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Question Meta-Data"),
title_display = T("Question Meta-Data Details"),
title_list = T("Question Meta-Data"),
title_update = T("Edit Question Meta-Data"),
label_list_button = T("List Question Meta-Data"),
label_delete_button = T("Delete this Question Meta-Data"),
msg_record_created = T("Question Meta-Data added"),
msg_record_modified = T("Question Meta-Data updated"),
msg_record_deleted = T("Question Meta-Data deleted"),
msg_list_empty = T("No Question Meta-Data"),
title_upload = T("Upload a Question List import file")
)
configure(tablename,
deduplicate = self.survey_question_metadata_duplicate,
)
# -------------------------------------------------------------------------
# The survey_question_list table is a resolver between
# the survey_question and the survey_section tables.
#
# Along with ids mapping back to these tables
# it will have a code that can be used to reference the question
# it will have the position that the question will appear in the template
tablename = "survey_question_list"
define_table(tablename,
Field("posn",
"integer",
notnull=True,
),
self.survey_template_id(),
question_id(),
self.survey_section_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_upload = T("Upload an Assessment Template import file")
)
configure(tablename,
deduplicate = self.survey_question_list_duplicate,
onaccept = self.question_list_onaccept,
)
# Pass names back to global scope (s3.*)
# ---------------------------------------------------------------------
return dict(survey_qstn_name_represent = self.qstn_name_represent,
survey_question_id = question_id
)
# -------------------------------------------------------------------------
@staticmethod
def qstn_name_represent(value):
"""
Return the question name, for locations in the gis hierarchy
the localised name will be returned
"""
if value == "L0" or value == "L1" or \
value == "L2" or value == "L3" or value == "L4":
return current.gis.get_location_hierarchy(value)
else:
return value
# -------------------------------------------------------------------------
@staticmethod
def question_onvalidate(form):
"""
Any text with the metadata that is imported will be held in
single quotes, rather than double quotes and so these need
to be escaped to double quotes to make it valid JSON
"""
from xml.sax.saxutils import unescape
metadata = form.vars.metadata
if metadata != None:
metadata = unescape(metadata, {"'":'"'})
return True
# -------------------------------------------------------------------------
@staticmethod
def question_onaccept(form):
"""
All of the question metadata will be stored in the metadata
field in a JSON format.
They will then be inserted into the survey_question_metadata
table pair will be a record on that table.
"""
vars = form.vars
if vars.metadata is None:
return
if vars.id:
record = current.s3db.survey_question[vars.id]
else:
return
if vars.metadata and \
vars.metadata != "":
survey_updateMetaData(record,
vars.type,
vars.metadata
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question code
"""
if job.tablename == "survey_question":
table = job.table
data = job.data
code = data.get("code")
query = (table.code == code)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_metadata_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question_id and descriptor
"""
if job.tablename == "survey_question_metadata":
table = job.table
data = job.data
question = data.get("question_id")
descriptor = data.get("descriptor")
query = (table.descriptor == descriptor) & \
(table.question_id == question)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def question_list_onaccept(form):
"""
If a grid question is added to the the list then all of the
grid children will need to be added as well
"""
qstntable = current.s3db.survey_question
try:
vars = form.vars
question_id = vars.question_id
template_id = vars.template_id
section_id = vars.section_id
posn = vars.posn
except:
return
record = qstntable[question_id]
try:
type = record.type
except:
_debug("survey question missing type: %s" % record)
return
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
if type == "Location":
widgetObj = survey_question_type["Location"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_list_duplicate(job):
"""
Rules for finding a duplicate:
- The template_id, question_id and section_id are the same
"""
if job.tablename == "survey_question_list":
table = job.table
data = job.data
tid = data.get("template_id")
qid = data.get("question_id")
sid = data.get("section_id")
query = (table.template_id == tid) & \
(table.question_id == qid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQuestionFromCode(code, series_id=None):
"""
Function to return the question for the given series
with the code that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
if series_id != None:
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
else:
query = (q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
question = {}
if record != None:
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_getAllQuestionsForTemplate(template_id):
"""
Function to return the list of questions for the given template
The questions are returned in the order of their position in the
template.
The data on a question that it returns is as follows:
qstn_id, code, name, type, posn, section
"""
s3db = current.s3db
sectable = s3db.survey_section
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == sectable.id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
sectable.name,
q_ltable.posn,
orderby=(q_ltable.posn))
questions = []
for row in rows:
question = {}
sq = row.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = s3db.survey_qstn_name_represent(sq.name)
question["type"] = sq.type
question["posn"] = row.survey_question_list.posn
question["section"] = row.survey_section.name
questions.append(question)
return questions
# =============================================================================
def survey_getAllQuestionsForSeries(series_id):
"""
Function to return the list of questions for the given series
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
questions = survey_getAllQuestionsForTemplate(template_id)
return questions
# =============================================================================
def survey_getAllQuestionsForComplete(complete_id):
"""
Function to return a tuple of the list of questions and series_id
for the given completed_id
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_complete
row = current.db(table.id == complete_id).select(table.series_id,
limitby=(0, 1)).first()
series_id = row.series_id
questions = survey_getAllQuestionsForSeries(series_id)
return (questions, series_id)
# =============================================================================
def survey_get_series_questions_of_type(questionList, type):
"""
"""
if isinstance(type, (list, tuple)):
types = type
else:
types = (type)
questions = []
for question in questionList:
if question["type"] in types:
questions.append(question)
elif question["type"] == "Link" or \
question["type"] == "GridChild":
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if widgetObj.getParentType() in types:
question["name"] = widgetObj.fullName()
questions.append(question)
return questions
# =============================================================================
def survey_getQuestionFromName(name, series_id):
"""
Function to return the question for the given series
with the name that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.name == name)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
if record == None:
# Unable to get the record from the question name
# It could be because the question is a location
# So get the location names and then check
locList = current.gis.get_all_current_levels()
for row in locList.items():
if row[1] == name:
return survey_getQuestionFromName(row[0],series_id)
question = {}
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_updateMetaData (record, type, metadata):
"""
"""
metatable = current.s3db.survey_question_metadata
id = record.id
# the metadata can either be passed in as a JSON string
# or as a parsed map. If it is a string load the map.
if isinstance(metadata, str):
metadataList = json2py(metadata)
else:
metadataList = metadata
for (desc, value) in metadataList.items():
desc = desc.strip()
if not isinstance(value, str):
# web2py stomps all over a list so convert back to a string
# before inserting it on the database
value = json.dumps(value)
value = value.strip()
metatable.insert(question_id = id,
descriptor = desc,
value = value
)
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildren(record, metadataList)
# =============================================================================
class S3SurveyFormatterModel(S3Model):
"""
The survey_formatter table defines the order in which the questions
will be laid out when a formatted presentation is used.
The idea is to be able to present the questions in a format that
best uses the available space and is familiar to those using the
tool.
Examples of formatted presentation are the spreadsheet and the web
form. This may be extended to PDF documents.
The rules are held as a JSON record and describe where each question
within the section should appear in terms of rows and columns. Each
question is referenced by the question code.
For example assume a section with the following eight questions:
QSTN_1, QSTN_2, QSTN_3, QSTN_4, QSTN_5, QSTN_6, QSTN_7, QSTN_8
Then to display them in three rows:
[[QSTN_1, QSTN_2, QSTN_3], [QSTN_4, QSTN_5, QSTN_6], [QSTN_7, QSTN_8]]
would present it as follows:
QSTN_1, QSTN_2, QSTN_3,
QSTN_4, QSTN_5, QSTN_6,
QSTN_7, QSTN_8
The order of the questions does not need to be preserved, thus:
[[QSTN_1, QSTN_2], [QSTN_4, QSTN_5, QSTN_3], [QSTN_7, QSTN_8, QSTN_6]]
would be valid, and give:
QSTN_1, QSTN_2,
QSTN_4, QSTN_5, QSTN_3,
QSTN_7, QSTN_8, QSTN_6,
***NOTE***
When importing this record with a CSV file the question code will be
single quoted, rather than double quoted which JSON requires.
This is because the whole rule needs to be double quoted. Code that
extracts the records from the table will then need to change all
single quotes to double quotes. This can be done as follows:
rowList = json2py(rules)
"""
names = ("survey_formatter",)
def model(self):
T = current.T
survey_formatter_methods = {
1: T("Default"),
2: T("Web Form"),
3: T("Spreadsheet"),
4: T("PDF"),
}
# ---------------------------------------------------------------------
tablename = "survey_formatter"
self.define_table(tablename,
self.survey_template_id(),
self.survey_section_id(),
Field("method", "integer",
default = 1,
requires = IS_IN_SET(survey_formatter_methods,
zero=None),
represent = lambda index: \
survey_formatter_methods[index],
#readable = True,
writable = False,
),
Field("rules", "text", default=""),
*s3_meta_fields()
)
self.configure(tablename,
deduplicate = self.survey_formatter_duplicate,
onaccept = self.formatter_onaccept,
)
# ---------------------------------------------------------------------
return dict()
# -------------------------------------------------------------------------
@staticmethod
def formatter_onaccept(form):
"""
If this is the formatter rules for the Background Information
section then add the standard questions to the layout
"""
s3db = current.s3db
section_id = form.vars.section_id
sectionTbl = s3db.survey_section
section_name = sectionTbl[section_id].name
if section_name == "Background Information":
col1 = []
# Add the default layout
templateTbl = s3db.survey_template
template = templateTbl[form.vars.template_id]
if template.competion_qstn != "":
col1.append("STD-WHO")
if template.date_qstn != "":
col1.append("STD-DATE")
if template.time_qstn != "":
col1.append("STD-TIME")
if "location_detail" in template:
col2 = ["STD-P-Code"]
locationList = json2py(template.location_detail)
for loc in locationList:
col2.append("STD-%s" % loc)
col = [col1, col2]
rule = [{"columns":col}]
ruleList = json2py(form.vars.rules)
ruleList[:0]=rule
rules = json.dumps(ruleList)
db = current.db
ftable = db.survey_formatter
db(ftable.id == form.vars.id).update(rules = rules)
# -------------------------------------------------------------------------
@staticmethod
def survey_formatter_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same template_id and section_id
"""
if job.tablename == "survey_formatter":
table = job.table
data = job.data
tid = data.get("template_id")
sid = data.get("section_id")
query = (table.template_id == tid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQstnLayoutRules(template_id,
section_id,
method = 1
):
"""
This will return the rules for laying out the questions for
the given section within the template.
This is used when generating a formatted layout.
First it will look for a survey_formatter record that matches
the method given. Failing that it will look for a default
survey_formatter record. If no appropriate survey_formatter
record exists for the section then it will use the posn
field found in the survey_question_list record.
The function will return a list of rows. Each row is a list
of question codes.
"""
db = current.db
s3db = current.s3db
# search for layout rules on the survey_formatter table
fmttable = s3db.survey_formatter
query = (fmttable.template_id == template_id) & \
(fmttable.section_id == section_id)
rows = db(query).select(fmttable.method,
fmttable.rules)
rules = None
drules = None # default rules
for row in rows:
if row.method == method:
rules = row.rules
break
elif row.method == 1:
drules = row.rules
if rules == None and drules != None:
rules = drules
rowList = []
if rules is None or rules == "":
# get the rules from survey_question_list
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == section_id) & \
(q_ltable.question_id == qsntable.id)
rows = db(query).select(qsntable.code,
q_ltable.posn,
orderby=(q_ltable.posn))
append = rowList.append
for qstn in rows:
append([qstn.survey_question.code])
else:
# convert the JSON rules to python
rowList = json2py(rules)
return rowList
# =============================================================================
class S3SurveySeriesModel(S3Model):
"""
Series Model
"""
names = ("survey_series",
"survey_series_id",
"survey_series_status",
)
def model(self):
T = current.T
person_id = self.pr_person_id
pr_person_comment = self.pr_person_comment
organisation_id = self.org_organisation_id
s3_date_represent = S3DateTime.date_represent
s3_date_format = current.deployment_settings.get_L10n_date_format()
crud_strings = current.response.s3.crud_strings
set_method = self.set_method
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
# ---------------------------------------------------------------------
# The survey_series table is used to hold all uses of a template
#
# When a series is first created the template status will change from
# Pending to Active and at the stage no further changes to the
# template can be made.
#
# Typically a series will be created for an event, which may be a
# response to a natural disaster, an exercise,
# or regular data collection activity.
#
# The series is a container for all the responses for the event
series_status = {
1: T("Active"),
2: T("Closed"),
}
tablename = "survey_series"
self.define_table(tablename,
Field("name", "string", length=120,
default = "",
requires = IS_NOT_EMPTY(),
),
Field("description", "text", default="", length=500),
Field("status", "integer",
default = 1,
requires = IS_IN_SET(series_status,
zero=None),
represent = lambda index: series_status[index],
#readable = True,
writable = False,
),
self.survey_template_id(empty=False,
ondelete="RESTRICT"),
person_id(),
organisation_id(widget = org_widget),
Field("logo", "string", default="", length=512),
Field("language", "string", default="en", length=8),
Field("start_date", "date",
default = None,
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
),
Field("end_date", "date",
default = None,
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
),
#self.super_link("source_id", "doc_source_entity"),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Conduct a Disaster Assessment"),
title_display = T("Details of Disaster Assessment"),
title_list = T("Disaster Assessments"),
title_update = T("Edit this Disaster Assessment"),
title_analysis_summary = T("Disaster Assessment Summary"),
title_analysis_chart = T("Disaster Assessment Chart"),
title_map = T("Disaster Assessment Map"),
subtitle_analysis_summary = T("Summary of Completed Assessment Forms"),
help_analysis_summary = T("Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms"),
subtitle_analysis_chart = T("Select a label question and at least one numeric question to display the chart."),
subtitle_map = T("Disaster Assessment Map"),
label_list_button = T("List Disaster Assessments"),
label_delete_button = T("Delete this Disaster Assessment"),
msg_record_created = T("Disaster Assessment added"),
msg_record_modified = T("Disaster Assessment updated"),
msg_record_deleted = T("Disaster Assessment deleted"),
msg_list_empty = T("No Disaster Assessments"))
self.configure(tablename,
create_next = URL(f="newAssessment",
vars={"viewing":"survey_series.[id]"}),
deduplicate = self.survey_series_duplicate,
onaccept = self.series_onaccept,
)
# Components
self.add_components(tablename,
survey_complete="series_id",
)
series_id = S3ReusableField("series_id", "reference %s" % tablename,
label = T("Series"),
represent = S3Represent(lookup=tablename),
readable = False,
writable = False,
)
# Custom Methods
set_method("survey", "series", method="summary", # NB This conflicts with the global summary method!
action=self.seriesSummary)
set_method("survey", "series", method="graph",
action=self.seriesGraph)
set_method("survey", "series", method="map", # NB This conflicts with the global map method!
action=self.seriesMap)
set_method("survey", "series", method="series_chart_download",
action=self.seriesChartDownload)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(survey_series_status = series_status,
survey_series_id = series_id,
)
# -------------------------------------------------------------------------
@staticmethod
def series_onaccept(form):
"""
Ensure that the template status is set to Active
"""
if form.vars.template_id:
template_id = form.vars.template_id
else:
return
table = current.s3db.survey_template
current.db(table.id == template_id).update(status = 2)
# -------------------------------------------------------------------------
@staticmethod
def survey_series_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_series":
table = job.table
data = job.data
name = data.get("name")
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def seriesSummary(r, **attr):
"""
"""
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
posn_offset = 11
# Retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
if request.env.request_method == "POST" \
or "mode" in request.vars:
# This means that the user has selected the questions and
# Wants to display the details of the selected questions
crud_strings = s3.crud_strings["survey_complete"]
question_ids = []
vars = request.vars
if "mode" in vars:
mode = vars["mode"]
series_id = r.id
if "selected" in vars:
selected = vars["selected"].split(",")
else:
selected = []
q_ltable = s3db.survey_question_list
sertable = s3db.survey_series
query = (sertable.id == series_id) & \
(sertable.template_id == q_ltable.template_id)
questions = db(query).select(q_ltable.posn,
q_ltable.question_id,
orderby = q_ltable.posn)
for question in questions:
qstn_posn = question.posn + posn_offset
if mode == "Inclusive":
if str(qstn_posn) in selected:
question_ids.append(str(question.question_id))
elif mode == "Exclusive":
if str(qstn_posn) not in selected:
question_ids.append(str(question.question_id))
items = buildCompletedList(series_id, question_ids)
if r.representation == "xls":
from ..s3.s3codecs.xls import S3XLS
exporter = S3XLS()
return exporter.encode(items,
title=crud_strings.title_selected,
use_colour=False
)
if r.representation == "html":
table = buildTableFromCompletedList(items)
#exporter = S3Exporter()
#table = exporter.html(items)
output["items"] = table
output["sortby"] = [[0, "asc"]]
url_pdf = URL(c="survey", f="series",
args=[series_id, "summary.pdf"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
url_xls = URL(c="survey", f="series",
args=[series_id, "summary.xls"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
s3.formats["pdf"] = url_pdf
s3.formats["xls"] = url_xls
else:
output["items"] = None
output["title"] = crud_strings.title_selected
output["subtitle"] = crud_strings.subtitle_selected
output["help"] = ""
else:
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
form = buildSeriesSummary(series_id, posn_offset)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
output["help"] = crud_strings.help_analysis_summary
s3.dataTableBulkActionPosn = "top"
s3.actions = None
current.response.view = "survey/series_summary.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def getChartName():
"""
Create a Name for a Chart
"""
import hashlib
vars = current.request.vars
end_part = "%s_%s" % (vars.numericQuestion,
vars.labelQuestion)
h = hashlib.sha256()
h.update(end_part)
encoded_part = h.hexdigest()
chartName = "survey_series_%s_%s" % (vars.series, encoded_part)
return chartName
# -------------------------------------------------------------------------
@staticmethod
def seriesChartDownload(r, **attr):
"""
"""
from gluon.contenttype import contenttype
series_id = r.id
seriesName = survey_getSeriesName(series_id)
filename = "%s_chart.png" % seriesName
response = current.response
response.headers["Content-Type"] = contenttype(".png")
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
chartFile = S3SurveySeriesModel.getChartName()
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
# The cached version doesn't exist so regenerate it
output = dict()
vars = current.request.get_vars
if "labelQuestion" in vars:
labelQuestion = vars.labelQuestion
if "numericQuestion" in vars:
numQstnList = vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion, outputFormat="png")
return output["chart"]
# -------------------------------------------------------------------------
@staticmethod
def seriesGraph(r, **attr):
"""
Allows the user to select one string question and multiple numeric
questions. The string question is used to group the numeric data,
with the result displayed as a bar chart.
For example:
The string question can be Geographic area, and the numeric
questions could be people injured and families displaced.
Then the results will be grouped by each geographical area.
"""
T = current.T
request = current.request
s3 = current.response.s3
output = dict()
# Draw the chart
vars = request.vars
if "viewing" in vars:
dummy, series_id = vars.viewing.split(".")
elif "series" in vars:
series_id = vars.series
else:
series_id = r.id
chartFile = S3SurveySeriesModel.getChartName()
cachePath = S3Chart.getCachedPath(chartFile)
if cachePath and request.ajax:
return IMG(_src=cachePath)
else:
numQstnList = None
labelQuestion = None
post_vars = request.post_vars
if post_vars is not None:
if "labelQuestion" in post_vars:
labelQuestion = post_vars.labelQuestion
if "numericQuestion" in post_vars:
numQstnList = post_vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion)
if request.ajax == True and "chart" in output:
return output["chart"]
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output["rheader"] = rheader
# ---------------------------------------------------------------------
def addQstnChkboxToTR(numQstnList, qstn):
"""
Build the form
"""
tr = TR()
if numQstnList != None and qstn["code"] in numQstnList:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
value=True,
)
)
else:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
)
)
tr.append(LABEL(qstn["name"]))
return tr
if series_id == None:
return output
allQuestions = survey_getAllQuestionsForSeries(series_id)
labelTypeList = ("String",
"Option",
"YesNo",
"YesNoDontKnow",
"Location",
)
labelQuestions = survey_get_series_questions_of_type (allQuestions, labelTypeList)
lblQstns = []
for question in labelQuestions:
lblQstns.append(question["name"])
numericTypeList = ("Numeric")
form = FORM(_id="mapGraphForm")
table = TABLE()
labelQstn = SELECT(lblQstns, _name="labelQuestion", value=labelQuestion)
table.append(TR(TH("%s:" % T("Select Label Question")), _class="survey_question"))
table.append(labelQstn)
table.append(TR(TH(T("Select Numeric Questions (one or more):")), _class="survey_question"))
# First add the special questions
specialQuestions = [{"code":"Count", "name" : T("Number of Completed Assessment Forms")}]
innerTable = TABLE()
for qstn in specialQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
# Now add the numeric questions
numericQuestions = survey_get_series_questions_of_type (allQuestions, numericTypeList)
innerTable = TABLE()
for qstn in numericQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
form.append(table)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
button = INPUT(_type="button", _id="chart_btn", _name="Chart", _value=T("Display Chart"))
form.append(series)
form.append(button)
# Set up the javascript code for ajax interaction
jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
s3.jquery_ready.append('''
$('#chart_btn').click(function(){
var data=$('#mapGraphForm').serialize()
var url='<a class="action-btn" href=series_chart_download?' + data + '>Download Chart</a>'
$.post('%s',data,function(data){
$('#survey_chart').empty();
$('#survey_chart').append(data);
$('#survey_chart_download').empty();
$('#survey_chart_download').append(url);
});
});
''' % jurl)
output["showForm"] = P(T("Click on the chart to show/hide the form."))
output["form"] = form
output["title"] = s3.crud_strings["survey_series"].title_analysis_chart
current.response.view = "survey/series_analysis.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def drawChart(output, series_id, numQstnList, labelQuestion, outputFormat=None):
"""
"""
T = current.T
getAnswers = survey_getAllAnswersForQuestionInSeries
gqstn = survey_getQuestionFromName(labelQuestion, series_id)
gqstn_id = gqstn["qstn_id"]
ganswers = getAnswers(gqstn_id, series_id)
dataList = []
legendLabels = []
for numericQuestion in numQstnList:
if numericQuestion == "Count":
# get the count of replies for the label question
gqstn_type = gqstn["type"]
analysisTool = survey_analysis_type[gqstn_type](gqstn_id, ganswers)
map = analysisTool.uniqueCount()
label = map.keys()
data = map.values()
legendLabels.append(T("Count of Question"))
else:
qstn = survey_getQuestionFromCode(numericQuestion, series_id)
qstn_id = qstn["qstn_id"]
qstn_type = qstn["type"]
answers = getAnswers(qstn_id, series_id)
analysisTool = survey_analysis_type[qstn_type](qstn_id, answers)
label = analysisTool.qstnWidget.fullName()
if len(label) > 20:
label = "%s..." % label[0:20]
legendLabels.append(label)
grouped = analysisTool.groupData(ganswers)
aggregate = "Sum"
filtered = analysisTool.filter(aggregate, grouped)
(label, data) = analysisTool.splitGroupedData(filtered)
if data != []:
dataList.append(data)
if dataList == []:
output["chart"] = H4(T("There is insufficient data to draw a chart from the questions selected"))
else:
chartFile = S3SurveySeriesModel.getChartName()
chart = S3Chart(path=chartFile, width=7.2)
chart.asInt = True
chart.survey_bar(labelQuestion,
dataList,
label,
legendLabels)
if outputFormat == None:
image = chart.draw()
else:
image = chart.draw(output=outputFormat)
output["chart"] = image
request = current.request
chartLink = A(T("Download"),
_href=URL(c="survey",
f="series",
args=request.args,
vars=request.vars
)
)
output["chartDownload"] = chartLink
# -------------------------------------------------------------------------
@staticmethod
def seriesMap(r, **attr):
"""
"""
import math
from s3survey import S3AnalysisPriority
T = current.T
response = current.response
s3 = response.s3
request = current.request
gis = current.gis
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id == None:
seriesList = []
append = seriesList.append
records = survey_getAllSeries()
for row in records:
append(row.id)
else:
seriesList = [series_id]
pqstn = {}
pqstn_name = request.post_vars.get("pqstn_name", None)
if pqstn_name is None:
pqstn = survey_getPriorityQuestionForSeries(series_id)
if "name" in pqstn:
pqstn_name = pqstn["name"]
feature_queries = []
bounds = {}
# Build the drop down list of priority questions
allQuestions = survey_getAllQuestionsForSeries(series_id)
numericTypeList = ("Numeric")
numericQuestions = survey_get_series_questions_of_type(allQuestions,
numericTypeList)
numQstns = []
for question in numericQuestions:
numQstns.append(question["name"])
form = FORM(_id="mapQstnForm")
table = TABLE()
if pqstn:
priorityQstn = SELECT(numQstns, _name="pqstn_name",
value=pqstn_name)
else:
priorityQstn = None
# Set up the legend
priorityObj = S3AnalysisPriority(range=[-.66, .66],
colour={-1:"#888888", # grey
0:"#008000", # green
1:"#FFFF00", # yellow
2:"#FF0000", # red
},
# Make Higher-priority show up more clearly
opacity={-1:0.5,
0:0.6,
1:0.7,
2:0.8,
},
image={-1:"grey",
0:"green",
1:"yellow",
2:"red",
},
desc={-1:"No Data",
0:"Low",
1:"Average",
2:"High",
},
zero = True)
for series_id in seriesList:
series_name = survey_getSeriesName(series_id)
response_locations = getLocationList(series_id)
if pqstn == {} and pqstn_name:
for question in numericQuestions:
if pqstn_name == question["name"]:
pqstn = question
if pqstn != {}:
pqstn_id = pqstn["qstn_id"]
answers = survey_getAllAnswersForQuestionInSeries(pqstn_id,
series_id)
analysisTool = survey_analysis_type["Numeric"](pqstn_id,
answers)
analysisTool.advancedResults()
else:
analysisTool = None
if analysisTool != None and not math.isnan(analysisTool.mean):
pBand = analysisTool.priorityBand(priorityObj)
legend = TABLE(
TR (TH(T("Marker Levels"), _colspan=3),
_class= "survey_question"),
)
for key in priorityObj.image.keys():
tr = TR(TD(priorityObj.imageURL(request.application,
key)),
TD(priorityObj.desc(key)),
TD(priorityObj.rangeText(key, pBand)),
)
legend.append(tr)
output["legend"] = legend
if len(response_locations) > 0:
for i in range( 0 , len( response_locations) ):
location = response_locations[i]
complete_id = location.complete_id
# Insert how we want this to appear on the map
url = URL(c="survey",
f="series",
args=[series_id,
"complete",
complete_id,
"read"
]
)
location.shape = "circle"
location.size = 5
if analysisTool is None:
priority = -1
else:
priority = analysisTool.priority(complete_id,
priorityObj)
location.colour = priorityObj.colour[priority]
location.opacity = priorityObj.opacity[priority]
location.popup_url = url
location.popup_label = response_locations[i].name
feature_queries.append({ "name": "%s: Assessments" % series_name,
"query": response_locations,
"active": True })
if bounds == {}:
bounds = (gis.get_bounds(features=response_locations))
else:
new_bounds = gis.get_bounds(features=response_locations)
# Where is merge_bounds defined!?
bounds = merge_bounds([bounds, new_bounds])
if bounds == {}:
bounds = gis.get_bounds()
map = gis.show_map(feature_queries = feature_queries,
#height = 600,
#width = 720,
bbox = bounds,
#collapsed = True,
catalogue_layers = True,
)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
table.append(TR(TH("%s:" % T("Display Question on Map")),
_class="survey_question"))
table.append(priorityQstn)
table.append(series)
form.append(table)
button = INPUT(_type="submit", _name="Chart",
_value=T("Update Map"))
# REMOVED until we have dynamic loading of maps.
#button = INPUT(_type="button", _id="map_btn", _name="Map_Btn", _value=T("Select the Question"))
#jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
#s3.jquery_ready.append('''
#$('#map_btn').click(function(){
# $.post('%s',$('#mapQstnForm').serialize(),function(data){
# obj = jQuery.parseJSON(data);
# $('#survey_map-legend').empty();
# $('#survey_map-legend').append(obj.legend);
# $('#survey_map-container').empty();
# $('#survey_map-container').append(obj.map);
# });
#});
#''' % jurl)
form.append(button)
output["title"] = crud_strings.title_map
output["subtitle"] = crud_strings.subtitle_map
output["instructions"] = T("Click on a marker to see the Completed Assessment Form")
output["form"] = form
output["map"] = map
response.view = "survey/series_map.html"
return output
# =============================================================================
def survey_serieslist_dataTable_post(r):
"""
Replace the Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages.UPDATE,
_class="action-btn edit",
url=URL(c="survey", f="series",
args=["[id]", "summary"]
)
),
]
# =============================================================================
def survey_series_rheader(r):
"""
The series rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if not record:
series_id = current.request.vars.series
record = survey_getSeries(series_id)
if record != None:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Details"), None),
(T("Completed Assessments"), "complete"),
(T("Summary"), "summary"),
(T("Chart"), "graph"),
(T("Map"), "map"),
]
if current.auth.s3_has_permission("create", "survey_complete"):
tabs.insert(1, (T("Enter Completed Assessment"), "newAssessment/"))
rheader_tabs = s3_rheader_tabs(r, tabs)
completeTable = s3db.survey_complete
qty = current.db(completeTable.series_id == record.id).count()
tsection = TABLE(_class="survey-complete-list")
lblSection = T("Number of Completed Assessment Forms")
rsection = TR(TH(lblSection), TD(qty))
tsection.append(rsection)
urlexport = URL(c="survey", f="series_export_formatted",
args=[record.id])
tranForm = FORM(_action=urlexport)
translationList = survey_getAllTranslationsForSeries(record.id)
if len(translationList) > 0:
tranTable = TABLE()
tr = TR(INPUT(_type='radio',
_name='translationLanguage',
_value="Default",
_checked=True,
),
LABEL("Default"))
colCnt = 1
for translation in translationList:
# include a maximum of 4 translation languages per row
if colCnt == 4:
tranTable.append(tr)
tr = TR()
colCnt = 0
tr.append(INPUT(_type="radio",
_name="translationLanguage",
_value=translation["code"],
))
tr.append(LABEL(translation["language"]))
colCnt += 1
if colCnt != 0:
tranTable.append(tr)
tranForm.append(tranTable)
export_xls_btn = INPUT(_type="submit",
_id="export_xls_btn",
_name="Export_Spreadsheet",
_value=T("Download Assessment Form Spreadsheet"),
_class="action-btn"
)
tranForm.append(export_xls_btn)
try:
# only add the Export to Word button up if PyRTF is installed
from PyRTF import Document
export_rtf_btn = INPUT(_type="submit",
_id="export_rtf_btn",
_name="Export_Word",
_value=T("Download Assessment Form Document"),
_class="action-btn"
)
tranForm.append(export_rtf_btn)
except:
pass
urlimport = URL(c="survey",
f="export_all_responses",
args=[record.id],
)
buttons = DIV(A(T("Export all Completed Assessment Data"),
_href=urlimport,
_id="All_resposnes",
_class="action-btn"
),
)
rheader = DIV(TABLE(
TR(TH("%s: " % T("Template")),
survey_template_represent(record.template_id),
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_series_status[record.status],
),
),
tsection,
tranForm,
buttons,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getSeries(series_id):
"""
function to return the series from a series id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getSeriesName(series_id):
"""
function to return the Series Name from the id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.name,
limitby=(0, 1)).first()
try:
return row.name
except:
return ""
# =============================================================================
def survey_getAllSeries():
"""
function to return all the series on the database
"""
table = current.s3db.survey_series
row = current.db(table.id > 0).select()
return row
# =============================================================================
def survey_buildQuestionnaireFromSeries(series_id, complete_id=None):
"""
build a form displaying all the questions for a given series_id
If the complete_id is also provided then the responses to each
completed question will also be displayed
"""
questions = survey_getAllQuestionsForSeries(series_id)
return buildQuestionsForm(questions, complete_id)
# =============================================================================
def survey_save_answers_for_series(series_id, complete_id, vars):
"""
function to save the list of answers for a completed series
"""
questions = survey_getAllQuestionsForSeries(series_id)
return saveAnswers(questions, series_id, complete_id, vars)
# =============================================================================
def saveAnswers(questions, series_id, complete_id, vars):
"""
"""
text = ""
table = current.s3db.survey_complete
for question in questions:
code = question["code"]
if (code in vars) and vars[code] != "":
line = '"%s","%s"\n' % (code, vars[code])
text += line
if complete_id == None:
# Insert into database
id = table.insert(series_id = series_id, answer_list = text)
S3SurveyCompleteModel.completeOnAccept(id)
return id
else:
# Update the complete_id record
current.db(table.id == complete_id).update(answer_list = text)
S3SurveyCompleteModel.completeOnAccept(complete_id)
return complete_id
# =============================================================================
def survey_getPriorityQuestionForSeries(series_id):
"""
"""
templateRec = survey_getTemplateFromSeries(series_id)
if templateRec != None:
priorityQstnCode = templateRec["priority_qstn"]
question = survey_getQuestionFromCode(priorityQstnCode, series_id)
return question
else:
return None
# =============================================================================
def buildSeriesSummary(series_id, posn_offset):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="series_summary",
_class="dataTable display")
hr = TR(TH(""), # Bulk action column
TH(T("Position")),
TH(T("Question")),
TH(T("Type")),
TH(T("Summary"))
)
header = THEAD(hr)
questions = survey_getAllQuestionsForSeries(series_id)
line = []
body = TBODY()
for question in questions:
if question["type"] == "Grid":
continue
question_id = question["qstn_id"]
widgetObj = survey_getWidgetFromQuestion(question_id)
br = TR()
posn = int(question["posn"])+posn_offset
br.append(TD(INPUT(_id="select%s" % posn,
_type="checkbox",
_class="bulkcheckbox",
)))
br.append(posn) # add an offset to make all id's +ve
br.append(widgetObj.fullName())
#br.append(question["name"])
type = widgetObj.type_represent()
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
analysisTool = survey_analysis_type[question["type"]](question_id,
answers)
chart = analysisTool.chartButton(series_id)
cell = TD()
cell.append(type)
if chart:
cell.append(chart)
br.append(cell)
analysisTool.count()
br.append(analysisTool.summary())
body.append(br)
table.append(header)
table.append(body)
s3 = current.response.s3
# Turn off server side pagination
s3.no_sspag = True
# Turn multi-select on
s3.dataTableBulkActions = [current.T("Display Selected Questions")]
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"series_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
series = INPUT(_type="hidden", _id="selectSeriesID", _name="series",
_value="%s" % series_id)
form.append(series)
return form
# =============================================================================
class S3SurveyCompleteModel(S3Model):
"""
Completed Surveys Model
"""
names = ("survey_complete",
"survey_complete_id",
"survey_answer",
)
def model(self):
T = current.T
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# The survey_complete table holds all of the answers for a completed
# response. It has a link back to the series this response belongs to.
#
# Whilst this table holds all of the answers in a text field during
# the onaccept each answer is extracted and then stored in the
# survey_answer table. This process of moving the answers to a
# separate table makes it easier to analyse the answers
# for a given question across all responses.
tablename = "survey_complete"
define_table(tablename,
self.survey_series_id(),
Field("answer_list", "text",
represent = survey_answer_list_represent,
),
Field("location", "text",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Enter Completed Assessment Form"),
title_display = T("Completed Assessment Form Details"),
title_list = T("Completed Assessment Forms"),
title_update = T("Edit Completed Assessment Form"),
title_selected = T("Selected Questions for all Completed Assessment Forms"),
subtitle_selected = T("Selected Questions for all Completed Assessment Forms"),
label_list_button = T("List Completed Assessment Forms"),
label_delete_button = T("Delete this Completed Assessment Form"),
msg_record_created = T("Completed Assessment Form entered"),
msg_record_modified = T("Completed Assessment Form updated"),
msg_record_deleted = T("Completed Assessment Form deleted"),
msg_list_empty = T("No Completed Assessment Forms"),
title_upload = T("Upload the Completed Assessment Form")
)
configure(tablename,
deduplicate = self.survey_complete_duplicate,
onaccept = self.complete_onaccept,
onvalidation = self.complete_onvalidate,
)
complete_id = S3ReusableField("complete_id", "reference %s" % tablename,
readable = False,
writable = False,
)
self.add_components(tablename,
survey_complete="series_id",
)
# ---------------------------------------------------------------------
# The survey_answer table holds the answer for a single response
# of a given question.
tablename = "survey_answer"
define_table(tablename,
complete_id(),
self.survey_question_id(),
Field("value", "text"),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Assessment Answer"),
title_display = T("Assessment Answer Details"),
title_list = T("Assessment Answers"),
title_update = T("Edit Assessment Answer"),
label_list_button = T("List Assessment Answers"),
label_delete_button = T("Delete this Assessment Answer"),
msg_record_created = T("Assessment Answer added"),
msg_record_modified = T("Assessment Answer updated"),
msg_record_deleted = T("Assessment Answer deleted"),
msg_list_empty = T("No Assessment Answers"))
configure(tablename,
deduplicate = self.survey_answer_duplicate,
onaccept = self.answer_onaccept,
)
# ---------------------------------------------------------------------
return dict(survey_complete_id = complete_id)
# -------------------------------------------------------------------------
@staticmethod
def extractAnswerFromAnswerList(answerList, qstnCode):
"""
function to extract the answer for the question code
passed in from the list of answers. This is in a CSV
format created by the XSL stylesheet or by the function
saveAnswers()
"""
start = answerList.find(qstnCode)
if start == -1:
return None
start = start + len(qstnCode) + 3
end = answerList.find('"', start)
answer = answerList[start:end]
return answer
# -------------------------------------------------------------------------
@staticmethod
def complete_onvalidate(form):
"""
"""
T = current.T
vars = form.vars
if "series_id" not in vars or vars.series_id == None:
form.errors.series_id = T("Series details missing")
return False
if "answer_list" not in vars or vars.answer_list == None:
form.errors.answer_list = T("The answers are missing")
return False
series_id = vars.series_id
answer_list = vars.answer_list
qstn_list = survey_getAllQuestionsForSeries(series_id)
qstns = []
for qstn in qstn_list:
qstns.append(qstn["code"])
answerList = answer_list.splitlines(True)
for answer in answerList:
qstn_code = answer[1:answer.find('","')]
if qstn_code not in qstns:
msg = "%s: %s" % (T("Unknown question code"), qstn_code)
if answer_list not in form.errors:
form.errors.answer_list = msg
else:
form.errors.answer_list += msg
return True
# -------------------------------------------------------------------------
@staticmethod
def complete_onaccept(form):
"""
All of the answers will be stored in the answer_list in the
format "code","answer"
They will then be inserted into the survey_answer table
each item will be a record on that table.
This will also extract the default location question as
defined by the template and store this in the location field
"""
if form.vars.id:
S3SurveyCompleteModel.completeOnAccept(form.vars.id)
# -------------------------------------------------------------------------
@staticmethod
def completeOnAccept(complete_id):
"""
"""
# Get the basic data that is needed
s3db = current.s3db
rtable = s3db.survey_complete
atable = s3db.survey_answer
record = rtable[complete_id]
series_id = record.series_id
purgePrefix = "survey_series_%s" % series_id
S3Chart.purgeCache(purgePrefix)
if series_id == None:
return
# Save all the answers from answerList in the survey_answer table
answerList = record.answer_list
S3SurveyCompleteModel.importAnswers(complete_id, answerList)
# Extract the default template location question and save the
# answer in the location field
templateRec = survey_getTemplateFromSeries(series_id)
locDetails = templateRec["location_detail"]
if not locDetails:
return
widgetObj = get_default_location(complete_id)
if widgetObj:
current.db(rtable.id == complete_id).update(location = widgetObj.repr())
locations = get_location_details(complete_id)
S3SurveyCompleteModel.importLocations(locations)
# -------------------------------------------------------------------------
@staticmethod
def importAnswers(id, list):
"""
private function used to save the answer_list stored in
survey_complete into answer records held in survey_answer
"""
import csv
import os
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
strio = StringIO()
strio.write(list)
strio.seek(0)
answer = []
append = answer.append
reader = csv.reader(strio)
for row in reader:
if row != None:
row.insert(0, id)
append(row)
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
writerow = writer.writerow
writerow(["complete_id", "question_code", "value"])
for row in answer:
writerow(row)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"survey",
"answer.xsl")
resource = current.s3db.resource("survey_answer")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def importLocations(location_dict):
"""
private function used to save the locations to gis.location
"""
import csv
import os
lastLocWidget = None
codeList = ["STD-L0","STD-L1","STD-L2","STD-L3","STD-L4"]
headingList = ["Country",
"ADM1_NAME",
"ADM2_NAME",
"ADM3_NAME",
"ADM4_NAME"
]
cnt = 0
answer = []
headings = []
aappend = answer.append
happend = headings.append
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
lastLocWidget = location_dict[loc]
happend(headingList[cnt])
cnt += 1
# Check that we have at least one location question answered
if lastLocWidget == None:
return
codeList = ["STD-P-Code","STD-Lat","STD-Lon"]
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
else:
aappend("")
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
headings += ["Code2", "Lat", "Lon"]
writer.writerow(headings)
writer.writerow(answer)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"gis",
"location.xsl")
resource = current.s3db.resource("gis_location")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def survey_complete_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name, answer_list
"""
if job.tablename == "survey_complete":
table = job.table
data = job.data
answers = data.get("answer_list")
query = (table.answer_list == answers)
try:
return duplicator(job, query)
except:
# if this is part of an import then the select will throw an error
# if the question code doesn't exist.
# This can happen during an import if the wrong file is used.
return
# -------------------------------------------------------------------------
@staticmethod
def answer_onaccept(form):
"""
Some question types may require additional processing
"""
vars = form.vars
if vars.complete_id and vars.question_id:
atable = current.s3db.survey_answer
complete_id = vars.complete_id
question_id = vars.question_id
value = vars.value
widgetObj = survey_getWidgetFromQuestion(question_id)
newValue = widgetObj.onaccept(value)
if newValue != value:
query = (atable.question_id == question_id) & \
(atable.complete_id == complete_id)
current.db(query).update(value = newValue)
# -------------------------------------------------------------------------
@staticmethod
def survey_answer_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same complete_id and question_id
"""
if job.tablename == "survey_answer":
table = job.table
data = job.data
qid = data.get("question_id")
rid = data.get("complete_id")
query = (table.question_id == qid) & \
(table.complete_id == rid)
return duplicator(job, query)
# =============================================================================
def survey_answerlist_dataTable_pre():
"""
The answer list has been removed for the moment. Currently it
displays all answers for a summary it would be better to
be able to display just a few select answers
"""
list_fields = ["created_on", "series_id", "location", "modified_by"]
current.s3db.configure("survey_complete", list_fields=list_fields)
# =============================================================================
def survey_answerlist_dataTable_post(r):
"""
Replace Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages["UPDATE"],
_class="action-btn edit",
url=URL(c="survey", f="series",
args=[r.id, "complete", "[id]", "update"])
),
]
# =============================================================================
def survey_answer_list_represent(value):
"""
Display the answer list in a formatted table.
Displaying the full question (rather than the code)
and the answer.
"""
db = current.db
qtable = current.s3db.survey_question
answer_text = value
list = answer_text.splitlines()
result = TABLE()
questions = {}
xml_decode = S3Codec.xml_decode
for line in list:
line = xml_decode(line)
(question, answer) = line.split(",",1)
question = question.strip("\" ")
if question in questions:
question = questions[question]
else:
query = (qtable.code == question)
qstn = db(query).select(qtable.name,
limitby=(0, 1)).first()
if not qstn:
continue
questions[question] = qstn.name
question = qstn.name
answer = answer.strip("\" ")
result.append(TR(TD(B(question)), TD(answer)))
return result
# =============================================================================
def get_location_details(complete_id):
"""
It will return a dict of values for all of the standard location
questions that have been answered
"""
db = current.db
s3db = current.s3db
locations = {}
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-P-Code",
"STD-L0", "STD-L1", "STD-L2", "STD-L3", "STD-L4",
"STD-Lat", "STD-Lon"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
widgetObj.loadAnswer(complete_id, record.id)
locations[locCode] = widgetObj
return locations
# =============================================================================
def get_default_location(complete_id):
"""
It will check each standard location question in
the hierarchy until either one is found or none are found
"""
db = current.db
s3db = current.s3db
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
break
if record:
widgetObj.loadAnswer(complete_id, record.id)
return widgetObj
else:
return None
# =============================================================================
def survey_getAllAnswersForQuestionInSeries(question_id, series_id):
"""
function to return all the answers for a given question
from with a specified series
"""
s3db = current.s3db
ctable = s3db.survey_complete
atable = s3db.survey_answer
query = (atable.question_id == question_id) & \
(atable.complete_id == ctable.id) & \
(ctable.series_id == series_id)
rows = current.db(query).select(atable.id,
atable.value,
atable.complete_id)
answers = []
for row in rows:
answer = {}
answer["answer_id"] = row.id
answer["value"] = row.value
answer["complete_id"] = row.complete_id
answers.append(answer)
return answers
# =============================================================================
def buildTableFromCompletedList(dataSource):
"""
"""
headers = dataSource[0]
items = dataSource[2:]
table = TABLE(_id="completed_list",
_class="dataTable display")
hr = TR()
for title in headers:
hr.append(TH(title))
header = THEAD(hr)
body = TBODY()
for row in items:
tr = TR()
for answer in row:
tr.append(TD(answer))
body.append(tr)
table.append(header)
table.append(body)
# Turn off server side pagination
current.response.s3.no_sspag = True
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"completed_list",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
return form
# =============================================================================
def buildCompletedList(series_id, question_id_list):
"""
build a list of completed items for the series including
just the questions in the list passed in
The list will come in three parts.
1) The first row is the header (list of field labels)
2) The seconds row is the type of each column
3) The remaining rows are the data
@param series_id: The id of the series
@param question_id_list: The list of questions to display
"""
db = current.db
qtable = current.s3db.survey_question
headers = []
happend = headers.append
types = []
items = []
qstn_posn = 0
rowLen = len(question_id_list)
complete_lookup = {}
for question_id in question_id_list:
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
widgetObj = survey_getWidgetFromQuestion(question_id)
question = db(qtable.id == question_id).select(qtable.name,
limitby=(0, 1)).first()
happend(question.name)
types.append(widgetObj.db_type())
for answer in answers:
complete_id = answer["complete_id"]
if complete_id in complete_lookup:
row = complete_lookup[complete_id]
else:
row = len(complete_lookup)
complete_lookup[complete_id]=row
items.append([''] * rowLen)
items[row][qstn_posn] = widgetObj.repr(answer["value"])
qstn_posn += 1
return [headers] + [types] + items
# =============================================================================
def getLocationList(series_id):
"""
Get a list of the LatLons for each Response in a Series
"""
response_locations = []
rappend = response_locations.append
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
table = current.s3db.survey_complete
rows = current.db(table.series_id == series_id).select(table.id,
table.answer_list)
for row in rows:
lat = None
lon = None
name = None
answer_list = row.answer_list.splitlines()
answer_dict = {}
for line in answer_list:
(question, answer) = line.split(",", 1)
question = question.strip('"')
if question in codeList:
# Store to get the name
answer_dict[question] = answer.strip('"')
elif question == "STD-Lat":
try:
lat = float(answer.strip('"'))
except:
pass
else:
if lat < -90.0 or lat > 90.0:
lat = None
elif question == "STD-Lon":
try:
lon = float(answer.strip('"'))
except:
pass
else:
if lon < -180.0 or lon > 180.0:
lon = None
else:
# Not relevant here
continue
for locCode in codeList:
# Retrieve the name of the lowest Lx
if locCode in answer_dict:
name = answer_dict[locCode]
break
if lat and lon:
# We have sufficient data to display on the map
location = Row()
location.lat = lat
location.lon = lon
location.name = name
location.complete_id = row.id
rappend(location)
else:
# The lat & lon were not added to the assessment so try and get one
locWidget = get_default_location(row.id)
if locWidget:
complete_id = locWidget.question["complete_id"]
if "answer" not in locWidget.question:
continue
answer = locWidget.question["answer"]
if locWidget != None:
record = locWidget.getLocationRecord(complete_id, answer)
if len(record.records) == 1:
location = record.records[0].gis_location
location.complete_id = complete_id
rappend(location)
return response_locations
# =============================================================================
class S3SurveyTranslateModel(S3Model):
"""
Translations Model
"""
from gluon.languages import read_dict, write_dict
names = ("survey_translate",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# The survey_translate table holds the details of the language
# for which the template has been translated into.
LANG_HELP = T("This is the full name of the language and will be displayed to the user when selecting the template language.")
CODE_HELP = T("This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.")
tablename = "survey_translate"
self.define_table(tablename,
self.survey_template_id(),
Field("language",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
LANG_HELP))
),
Field("code",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language Code"),
CODE_HELP))
),
Field("file", "upload",
autodelete=True),
Field("filename",
readable = False,
writable = False,
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Translation Language"),
)
self.configure(tablename,
onaccept = self.translate_onaccept,
)
# ---------------------------------------------------------------------
return dict()
# -------------------------------------------------------------------------
@staticmethod
def translate_onaccept(form):
"""
If the translation spreadsheet has been uploaded then
it needs to be processed.
The translation strings need to be extracted from
the spreadsheet and inserted into the language file.
"""
if "file" in form.vars:
try:
import xlrd
except ImportError:
print >> sys.stderr, "ERROR: xlrd & xlwt modules are needed for importing spreadsheets"
return None
from gluon.languages import read_dict, write_dict
T = current.T
request = current.request
response = current.response
msgNone = T("No translations exist in spreadsheet")
upload_file = request.post_vars.file
upload_file.file.seek(0)
openFile = upload_file.file.read()
lang = form.record.language
code = form.record.code
try:
workbook = xlrd.open_workbook(file_contents=openFile)
except:
msg = T("Unable to open spreadsheet")
response.error = msg
response.flash = None
return
try:
sheetL = workbook.sheet_by_name(lang)
except:
msg = T("Unable to find sheet %(sheet_name)s in uploaded spreadsheet") % \
dict(sheet_name=lang)
response.error = msg
response.flash = None
return
if sheetL.ncols == 1:
response.warning = msgNone
response.flash = None
return
count = 0
lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % \
(request.application, code)
try:
strings = read_dict(lang_fileName)
except:
strings = dict()
for row in xrange(1, sheetL.nrows):
original = sheetL.cell_value(row, 0)
translation = sheetL.cell_value(row, 1)
if (original not in strings) or translation != "":
strings[original] = translation
count += 1
write_dict(lang_fileName, strings)
if count == 0:
response.warning = msgNone
response.flash = None
else:
response.flash = T("%(count_of)d translations have been imported to the %(language)s language file") % \
dict(count_of=count, language=lang)
# =============================================================================
def survey_getAllTranslationsForTemplate(template_id):
"""
Function to return all the translations for the given template
"""
table = current.s3db.survey_translate
row = current.db(table.template_id == template_id).select()
return row
# =============================================================================
def survey_getAllTranslationsForSeries(series_id):
"""
Function to return all the translations for the given series
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
return survey_getAllTranslationsForTemplate(template_id)
# =============================================================================
# Generic function called by the duplicator methods to determine if the
# record already exists on the database.
def duplicator(job, query):
"""
This callback will be called when importing records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
"""
table = job.table
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# END =========================================================================
| mit | -4,218,395,409,947,875,300 | 38.298074 | 195 | 0.4751 | false |
QuLogic/meson | mesonbuild/modules/rpm.py | 1 | 8481 | # Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for RPM related
functionality such as generating template RPM spec file.'''
from .. import build
from .. import compilers
import datetime
from .. import mlog
from . import GirTarget, TypelibTarget
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreterbase import noKwargs
import os
class RPMModule(ExtensionModule):
@noKwargs
def generate_spec_template(self, coredata, args, kwargs):
self.coredata = coredata
required_compilers = self.__get_required_compilers()
proj = coredata.project_name.replace(' ', '_').replace('\t', '_')
so_installed = False
devel_subpkg = False
files = set()
files_devel = set()
to_delete = set()
for target in coredata.targets.values():
if isinstance(target, build.Executable) and target.need_install:
files.add('%%{_bindir}/%s' % target.get_filename())
elif isinstance(target, build.SharedLibrary) and target.need_install:
files.add('%%{_libdir}/%s' % target.get_filename())
for alias in target.get_aliases():
if alias.endswith('.so'):
files_devel.add('%%{_libdir}/%s' % alias)
else:
files.add('%%{_libdir}/%s' % alias)
so_installed = True
elif isinstance(target, build.StaticLibrary) and target.need_install:
to_delete.add('%%{buildroot}%%{_libdir}/%s' % target.get_filename())
mlog.warning('removing', mlog.bold(target.get_filename()),
'from package because packaging static libs not recommended')
elif isinstance(target, GirTarget) and target.should_install():
files_devel.add('%%{_datadir}/gir-1.0/%s' % target.get_filename()[0])
elif isinstance(target, TypelibTarget) and target.should_install():
files.add('%%{_libdir}/girepository-1.0/%s' % target.get_filename()[0])
for header in coredata.headers:
if header.get_install_subdir():
files_devel.add('%%{_includedir}/%s/' % header.get_install_subdir())
else:
for hdr_src in header.get_sources():
files_devel.add('%%{_includedir}/%s' % hdr_src)
for man in coredata.man:
for man_file in man.get_sources():
if man.locale:
files.add('%%{_mandir}/%s/man%u/%s.*' % (man.locale, int(man_file.split('.')[-1]), man_file))
else:
files.add('%%{_mandir}/man%u/%s.*' % (int(man_file.split('.')[-1]), man_file))
if files_devel:
devel_subpkg = True
filename = os.path.join(coredata.environment.get_build_dir(),
'%s.spec' % proj)
with open(filename, 'w+') as fn:
fn.write('Name: %s\n' % proj)
fn.write('Version: # FIXME\n')
fn.write('Release: 1%{?dist}\n')
fn.write('Summary: # FIXME\n')
fn.write('License: # FIXME\n')
fn.write('\n')
fn.write('Source0: %{name}-%{version}.tar.xz # FIXME\n')
fn.write('\n')
fn.write('BuildRequires: meson\n')
for compiler in required_compilers:
fn.write('BuildRequires: %s\n' % compiler)
for dep in coredata.environment.coredata.deps.host:
fn.write('BuildRequires: pkgconfig(%s)\n' % dep[0])
# ext_libs and ext_progs have been removed from coredata so the following code
# no longer works. It is kept as a reminder of the idea should anyone wish
# to re-implement it.
#
# for lib in state.environment.coredata.ext_libs.values():
# name = lib.get_name()
# fn.write('BuildRequires: {} # FIXME\n'.format(name))
# mlog.warning('replace', mlog.bold(name), 'with the real package.',
# 'You can use following command to find package which '
# 'contains this lib:',
# mlog.bold("dnf provides '*/lib{}.so'".format(name)))
# for prog in state.environment.coredata.ext_progs.values():
# if not prog.found():
# fn.write('BuildRequires: %%{_bindir}/%s # FIXME\n' %
# prog.get_name())
# else:
# fn.write('BuildRequires: {}\n'.format(prog.get_path()))
fn.write('\n')
fn.write('%description\n')
fn.write('\n')
if devel_subpkg:
fn.write('%package devel\n')
fn.write('Summary: Development files for %{name}\n')
fn.write('Requires: %{name}%{?_isa} = %{?epoch:%{epoch}:}{version}-%{release}\n')
fn.write('\n')
fn.write('%description devel\n')
fn.write('Development files for %{name}.\n')
fn.write('\n')
fn.write('%prep\n')
fn.write('%autosetup\n')
fn.write('\n')
fn.write('%build\n')
fn.write('%meson\n')
fn.write('%meson_build\n')
fn.write('\n')
fn.write('%install\n')
fn.write('%meson_install\n')
if to_delete:
fn.write('rm -vf %s\n' % ' '.join(to_delete))
fn.write('\n')
fn.write('%check\n')
fn.write('%meson_test\n')
fn.write('\n')
fn.write('%files\n')
for f in files:
fn.write('%s\n' % f)
fn.write('\n')
if devel_subpkg:
fn.write('%files devel\n')
for f in files_devel:
fn.write('%s\n' % f)
fn.write('\n')
if so_installed:
fn.write('%post -p /sbin/ldconfig\n')
fn.write('%postun -p /sbin/ldconfig\n')
fn.write('\n')
fn.write('%changelog\n')
fn.write('* %s meson <[email protected]> - \n' %
datetime.date.today().strftime('%a %b %d %Y'))
fn.write('- \n')
fn.write('\n')
mlog.log('RPM spec template written to %s.spec.\n' % proj)
return ModuleReturnValue(None, [])
def __get_required_compilers(self):
required_compilers = set()
for compiler in self.coredata.environment.coredata.compilers.host.values():
# Elbrus has one 'lcc' package for every compiler
if isinstance(compiler, compilers.GnuCCompiler):
required_compilers.add('gcc')
elif isinstance(compiler, compilers.GnuCPPCompiler):
required_compilers.add('gcc-c++')
elif isinstance(compiler, compilers.ElbrusCCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusCPPCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusFortranCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ValaCompiler):
required_compilers.add('vala')
elif isinstance(compiler, compilers.GnuFortranCompiler):
required_compilers.add('gcc-gfortran')
elif isinstance(compiler, compilers.GnuObjCCompiler):
required_compilers.add('gcc-objc')
elif compiler == compilers.GnuObjCPPCompiler:
required_compilers.add('gcc-objc++')
else:
mlog.log('RPM spec file not created, generation not allowed for:',
mlog.bold(compiler.get_id()))
return required_compilers
def initialize(*args, **kwargs):
return RPMModule(*args, **kwargs)
| apache-2.0 | -3,089,279,144,901,001,700 | 45.092391 | 113 | 0.545572 | false |
HazyResearch/dd-genomics | document_classifier/classification/processed/genomics_dump_to_processed.py | 1 | 1446 | #! /usr/bin/env python
import sys
import re
if __name__ == "__main__":
no_alnum = re.compile(r'[\W_]+')
with open(sys.argv[2], 'w') as out_file:
with open(sys.argv[1]) as f:
for line in f:
comps = line.strip().split('\t')
pmid = comps[0]
journal = comps[1]
mesh_terms_string = comps[2]
sv = comps[3]
text = comps[4]
gm = comps[5]
pm = comps[6]
sentences = text.split('|~^~|')
gm_sentences = gm.split('|~^~|')
pm_sentences = pm.split('|~^~|')
mesh_terms = mesh_terms_string.split('|^|')
new_text = 'JOURNAL_' + no_alnum.sub('_', journal).strip() + ' ' + ' '.join(['MeSH_' + no_alnum.sub('_', x).strip() for x in mesh_terms]) + ' '
for i, sentence in enumerate(sentences):
words = sentence.split('|^|')
if i >= len(gm_sentences):
print >>sys.stderr, (pmid, i, gm_sentences)
gms_string = gm_sentences[i]
pms_string = pm_sentences[i]
if gms_string != 'N':
gms = gms_string.split('|^+^|')
for gm in [int(x) for x in gms]:
words[gm] = 'ENSEMBLGENE'
if pms_string != 'N':
pms = pms_string.split('|^+^|')
for pm in [int(x) for x in pms]:
words[pm] = 'DETECTEDPHENO'
new_text += ' ' + ' '.join(words)
print >>out_file, "%s\t%s\t%s" % (pmid, new_text, sv)
| apache-2.0 | 7,812,585,201,237,845,000 | 34.268293 | 151 | 0.478562 | false |
widgetOne/league_admin | scheduler/schedule.py | 1 | 29282 | from model import init_value
import random
import copy
from pprint import pprint
import fitness
def list_filter(primary, filter):
both = [team for team in primary if team in filter]
if (len(both) > 0):
return list(both)
else:
return primary
class Schedule(object):
time_string = ['6pm', '7pm', '8pm', '9pm']
rec_first = True
# [{'times': 4, 'courts': 5},
# {'time': 2, 'court': 1, 'team1': (1, 2), 'team2': (1, 12), 'ref': (1, 8)},
# {'time': 2, 'court': 1, 'team1': (1, 2), 'team2': (1, 12)},
# {...]
def __init__(self, league, team_counts, facs):
from model import Division
self.team_counts = team_counts
self.daycount = len(facs)
self.divisions = [Division(count, self.daycount) for count in team_counts]
self.division_count = len(team_counts)
self.max_fitness = 0
self.league = league
self.courts = 5 # todo make dynamic
self.times = 4 # todo make dynamic
self.games_per_team = self.daycount
self.div_max_fitness = [init_value for _ in range(4)]
self.enhance_success = 0
self.days = []
for day_idx in range(self.daycount):
day = self.make_day(facs[day_idx], day_num=day_idx)
self.days.append(day)
self.fitness_structure = sum((day.fitness_str() for day in self.days))
def __repr__(self):
out = []
for day in self.days:
out += day.csv_str()
return "\n".join(out)
def gen_csv(self, loc):
with open(loc, "w") as csv_file:
print(self.__repr__(), file=csv_file)
def fitness(self):
self.fitness_structure = sum((day.fitness_str() for day in self.days))
return self.fitness_structure.value()
def fitness_div(self, div_idx):
return sum((day.fitness_str() for day in self.days)).div_value(div_idx)
def fitness_div_list(self):
sch_fitness = sum((day.fitness_str() for day in self.days))
return [sch_fitness.div_value(idx) for idx in range(self.division_count)]
def fitness_error_breakdown(self, div_idx=None):
sch_fitness = sum((day.fitness_str(div_idx=div_idx) for day in self.days))
return sch_fitness.error_breakdown()
def fitness_by_day(self):
return [day.fitness_str().value() for day in self.days]
def make_audit_structures(self):
from copy import deepcopy
rolling_sum_play = []
rolling_sum_ref = []
total_use = []
for div_idx in range(self.division_count):
div_arr = [0] * self.team_counts[div_idx]
rolling_sum_play.append(deepcopy(div_arr))
rolling_sum_ref.append(deepcopy(div_arr))
total_use.append(deepcopy(div_arr))
return rolling_sum_play, rolling_sum_ref, total_use
def write_audit_file(self, out_file_path):
audit_text = self.get_audit_text()
with open(out_file_path, "w") as csv_file:
print("\n".join(audit_text), file=csv_file)
def get_game_div_count_list(self):
div_totals = {}
for day in self.days:
for court in day.courts:
for game in court:
if game.div > -1:
div_totals[game.div] = div_totals.get(game.div, 0) + 1
output = [0] * (max(div_totals.keys()) + 1)
for key, value in div_totals.items():
output[key] = value
return output
def get_audit_text(self):
from copy import deepcopy
# todo: this summation logic could be integrated with fitness.py's
rolling_sum_play, rolling_sum_ref, total_use = self.make_audit_structures()
out = ['\n\nSchedule Audit Report']
out += ['\nCumulative Plays and Refs by Team']
out += ['This section displays the schedule along-side running totals of the play/ref for each team']
out += ['in the league. It is generated programatically, so you can audit its accuracy for specific']
out += ['teams and areas, and then infer the overall behavior. The final line of this section states']
out += ['the play and ref totals for each team over the season']
for day in self.days:
out += day.audit_view(rolling_sum_play, rolling_sum_ref)
out += ['\n\n\nCumulative Play / Ref Totals']
play_str = ''
ref_str = ''
for div_idx in range(len(self.team_counts)):
play_str += ",,PLAY DATA," + ",".join([(str(num)) for num in rolling_sum_play[div_idx]])
ref_str += ",,REF DATA," + ",".join([(str(num)) for num in rolling_sum_ref[div_idx]])
out += [play_str]
out += [ref_str]
out += ['\n\nTotal Use Audit']
out += ['This report displays the total use (play and ref) for each team in each time']
out += ['slot for each day. This is useful for checking that no one is double-booked']
out += ['(playing on two courts at a time, playing and reffing at the same time, etc']
for day in self.days:
out += day.audit_total_use_view(total_use)
out += ['\n\n\nVs View']
out += ["Number of times a team has played another team: rec, In, Cmp, Pw, P+"]
out += ["These are the total times each team will play another team in their own division"]
out += ["This data is symmetric for obvious reasons. The '1000' values are esentially filler"]
out += ["for the spaces for a team playing itself."]
for div_idx in range(len(self.team_counts)):
for team_idx in range(self.team_counts[div_idx]):
team = self.divisions[div_idx].teams[team_idx]
row = ",".join([str(num) for num in team.times_team_played])
out += [row]
out += []
out += ['\n\n\nBye View']
out += ['Below are the counts of the number of games each team has in a given week']
for div_idx in range(len(self.team_counts)):
out += ["division %s" % div_idx]
for team_idx in range(self.team_counts[div_idx]):
team = self.divisions[div_idx].teams[team_idx]
row = ",".join([str(num) for num in team.games_per_day]) + \
' total: {}'.format(sum((1 for num in team.games_per_day if num == 0)))
out += [row]
out += ['\n\n\nRepeated Play View\n']
out += [self.get_sequencial_vs_play_report()]
out += ['\n\n\nOpen Play Report\n']
out += [self.get_open_play_report()]
out += ['\n\n\nDouble Ref Report\n']
out += [self.get_double_ref_report()]
out += ['\n\n\nSolution Error']
out += ['This section reports what is checked for each division, and displays the total number']
out += ['of errors for each category, for each division. This is basically always all 0.']
out += [self.solution_debug_data()]
out += ['\n\n\n\n']
return '\n'.join(out)
def get_team_round_robin_audit(self):
day = self.days[0]
time_slots = len(day.courts[0])
div_histories = [[[0] * time_slots for _ in range(count)] for count in self.team_counts]
for court in day.courts:
for game_time, game in enumerate(court):
if game.team1 > -1:
div_histories[game.div][game.team1][game_time] += 1
div_histories[game.div][game.team2][game_time] += 1
asdf = '\n\n'.join('\n'.join((','.join((str(game) for game in team_hist)) for team_hist in div_hist)) for div_hist in div_histories)
return asdf
def get_sequencial_vs_play_report(self):
"""Collection all the vs games in order and assess how much they repeat.
This is only really an issue for the smaller leagues (rec and P+)"""
vs_play = [[[] for _ in range(count)] for count in self.team_counts]
for day in self.days:
for court in day.courts:
for game in court:
if game.div not in (init_value, -1):
vs_play[game.div][game.team1].append(game.team2)
vs_play[game.div][game.team2].append(game.team1)
vs_repeat_period = [[[] for _ in range(count)] for count in self.team_counts]
output = '\nDebug of repeats of the same vs match over successive games'
output += '\nEach line is the readout for one team'
output += '\nThe first list in a line are the opposing teams for each game and '
output += '\nthe second list are the successive games before that team is played again.'
output += '\n(the default for this is 0 if the team is not played again in a season)'
for div_idx, div_vs_play in enumerate(vs_play):
output += '\nOutput for division: {}\n'.format(div_idx)
for team_idx, team_vs_play in enumerate(div_vs_play):
for idx1 in range(len(team_vs_play)):
for idx2 in range(idx1 + 1, len(team_vs_play)):
if team_vs_play[idx1] == team_vs_play[idx2]:
vs_repeat_period[div_idx][team_idx].append(idx2 - idx1)
break
else:
vs_repeat_period[div_idx][team_idx].append(0)
output += ','.join([str(num) for num in vs_play[div_idx][team_idx]]) + ' '
output += ','.join([str(num) for num in vs_repeat_period[div_idx][team_idx]]) + '\n'
return output
def get_open_play_report(self):
"""Generate a report of teams with open play opertunities vs day for debugging"""
output = '\nDebug of open play opertunties in the schedule.'
output += '\nFirst, there is a report of what the facilities objects view as open play.'
output += '\nThen there is the day-by-day running total of the number'
output += '\nof oppertinies a team has to play open play i.e. when a team has'
output += '\na game before or after an open play / skills clinic slot'
output += '\nOpen play slots'
for idx, day in enumerate(self.days):
open_play_times = day.facility_day.get_open_play_times()
output += '\nThe open play oppertunies for day {} are {}'.format(idx, open_play_times)
output += '\nOpen oppertunities by team'
total_fit = 0
for idx, day in enumerate(self.days):
day_fit = day.fitness_str()
total_fit += day_fit
output += '\nOpen play data after day {} is {}'.format(idx, total_fit.open_play_lists())
return output
def get_double_ref_report(self):
"""Generate a report of teams with open play opertunities vs day for debugging"""
output = '\nDebug of double ref metrics. This means one team getting forced'
output += '\nto ref twice in the same day'
for idx, day in enumerate(self.days):
day_fitness = day.fitness_str()
output += '\ndouble refs for day {}: {}'.format(idx, day_fitness.get_double_ref_lists())
return output
def remake_worst_day(self, count):
days_fitness = [(idx, day.fitness(self.divisions)) for idx, day in enumerate(self.days)]
days_fitness.sort(key=lambda x: x[1])
worst_days = [days_fitness[idx][0] for idx in range(count)]
fitness = self.try_remake_days(worst_days)
return fitness
def try_remake_a_few_random_days(self, div_idx, day_remake_count):
all_day_indexes = range(len(self.days))
days_to_remake = random.sample(all_day_indexes, day_remake_count)
self.try_remake_div_days(div_idx, days_to_remake)
### not currently used qwer
def try_remake_div_days(self, div_idx, day_indexes):
from copy import deepcopy
original_days = deepcopy(self.days)
original_division = deepcopy(self.divisions)
original_fitness = self.fitness()
for day_idx in day_indexes:
self.subtract_day_from_division_history(self.days[day_idx])
for day_idx in day_indexes:
new_day = self.make_day(self.days[day_idx].facility_day,
old_day=self.days[day_idx], target_div_idx=div_idx)
self.days[day_idx] = new_day
fitness = self.fitness()
# fudge factor to allow more drift in solution, to avoid local stability issues
local_stability_avoidance_fudge_factor = 0
if original_fitness > fitness + local_stability_avoidance_fudge_factor:
self.days = original_days
self.divisions = original_division
fitness = original_fitness
return fitness
def try_remake_days(self, day_indexes):
from copy import deepcopy
original_days = deepcopy(self.days)
original_division = deepcopy(self.divisions)
original_fitness = self.fitness()
for day_idx in day_indexes:
self.subtract_day_from_division_history(self.days[day_idx])
for day_idx in day_indexes:
new_day = self.make_day(self.days[day_idx].facilities,
old_day=self.days[day_idx])
self.days[day_idx] = new_day
fitness = self.fitness()
if original_fitness > fitness:
self.days = original_days
self.divisions = original_division
fitness = original_fitness
return fitness
# todo: not currently used. use or delete
def try_remake_days_new_method(self, day_indexs):
from copy import deepcopy
original_days = deepcopy(self.days)
original_division = deepcopy(self.divisions)
original_fitness = self.fitness(self.league.games_per_div)
for day_idx in day_indexs:
self.subtract_day_from_division_history(self.days[day_idx])
for day_idx in day_indexs:
new_day = self.make_day(self.days[day_idx].facilities,
old_day=self.days[day_idx])
self.add_day_to_division_history(new_day)
self.days[day_idx] = new_day
fitness = self.fitness(self.league.games_per_div)
if original_fitness > fitness:
self.days = original_days
self.divisions = original_division
fitness = original_fitness
return fitness
def make_day(self, fac, day_num=None, old_day=None, target_div_idx=None):
from model import Day
if day_num == None:
day_num = old_day.num
day = Day(fac, day_num)
if target_div_idx is not None:
division_list = [(target_div_idx, self.divisions[target_div_idx])]
else:
division_list = list(enumerate(self.divisions)) # todo: use or delete this code. Woudl probably need a deep copy up above
for div_idx, div in enumerate(self.divisions):
try:
if self.fitness_structure.div_value(div_idx) == 0 or (target_div_idx is not None and target_div_idx != div_idx):
if old_day != None:
day.import_div_games(div_idx, old_day)
self.add_day_to_division_history(day, div_idx=div_idx)
continue
except:
pass
day.schedule_div_players_without_refs(fac, div_idx, div)
return day
def get_sitting_counts(self):
from copy import deepcopy
from fitness import add_lists
self.div_team_times = []
list_of_times = [0] * self.league.ntimes
total_sits = [0] * 8
for div_idx in range(len(self.team_counts)):
team_count = self.team_counts[div_idx]
div_times = [deepcopy(list_of_times) for _ in range(team_count)]
self.div_team_times.append(div_times)
for court in self.days[0].courts:
for time, game in enumerate(court):
if game.div >= 0:
self.div_team_times[game.div][game.team1][time] += 1
self.div_team_times[game.div][game.team2][time] += 1
#for div_idx in range(len(self.team_counts)):
# div_sits = [sitting_counts for _ in range(len(self.team_counts[div_idx))]]
#league_sits = [div_sits for _ in range(len(self.team_counts))]
team_sits = []
for div_idx in range(len(self.team_counts)):
for team_idx in range(self.divisions[div_idx].team_count):
last_play = init_value
play_v_time = self.div_team_times[div_idx][team_idx]
temp_sits = [0] * 8
team_total_sit = 0
for time, plays in enumerate(play_v_time):
if plays:
if last_play == init_value:
last_play = time
else:
sit_time = time - last_play - 1
temp_sits[sit_time] += 1
last_play = time
team_total_sit += sit_time * 15
team_sits.append(temp_sits)
total_sits = add_lists(total_sits, temp_sits)
return total_sits, team_sits
def sitting_fitness(self):
'''returns the total number of games sat by all teams'''
sit_fitness = 0
long_sit = 0
sitting_counts, team_sits = self.get_sitting_counts()
bad = -999999
# todo: refactor to a team centric fitness
# todo: calculation a division-wise fitness
# todo: have the selection logic pick the best divisional schedule,
# not the best schedules that co-enside.
fitness_func = [
('sitting is sitting', [0, -15, -30, -45, -60, -75, -90]),
('sitting is sitting <h', [0, -15, -30, -45, bad, bad, bad]),
('sitting is sitting <45', [0, -1, -2, bad, bad, bad, bad]),
('longer is worse quad', [0, -5, -20, -45, -80, -125, -180]),
('long sits worse quad', [0, 0, -1, -4, -9, -16, -25]),
('min 45 minutes', [0, 0, -2, -200, bad, bad, bad]),
('min hour and no-sit', [-180, 0, -5, -600, bad, bad, bad]),
('min hour and small-sit', [-200, 0, 0, -200, bad, bad, bad]),
]
game_length = 20
time_cypher = [game_length * idx for idx in range(10)]
team_wise_penality = [-5000,0,0,0, -100,bad,bad,bad, bad,bad]
count = sum(sitting_counts)
sum_prod = sum(time * count for time, count in enumerate(sitting_counts))
average = sum_prod / count * 4
results = {}
# calc team-wise sitting function
team_penalty_total = 0
for team_sit in team_sits:
team_sit_total = sum((a * b for a, b in zip(team_sit, time_cypher)))
team_penalty_total += team_wise_penality[int(team_sit_total / 20)]
teamwise_fitness = team_penalty_total / len(team_sits)
# calc the total sit fitness for various functions
for name, func in fitness_func:
raw_fitness = sum((a * b for a, b in zip(func, sitting_counts)))
fitness = raw_fitness - teamwise_fitness
ave = fitness / sum(self.team_counts)
result = {'fitness': fitness, 'sits': sitting_counts, 'func': func,
'ave': ave, 'team_sits': team_sits,
'teamwise_fitness': teamwise_fitness,
'raw_fitness': raw_fitness}
results[name] = result
return results
def add_day_to_division_history(self, day, div_idx=None, sign=1):
for court_idx, court in enumerate(day.courts):
for game in court:
if (game.div == init_value):
continue
if div_idx != None and div_idx!= game.div:
continue
fake_values = [init_value, -1]
if game.team1 not in fake_values and game.team2 not in fake_values:
self.divisions[game.div].teams[game.team1].times_team_played[game.team2] += sign
self.divisions[game.div].teams[game.team2].times_team_played[game.team1] += sign
if (self.divisions[game.div].teams[game.team2].times_team_played[game.team1] < 0 or
self.divisions[game.div].teams[game.team2].times_team_played[game.team1] < 0):
raise(Exception('there is something wrong going on'))
if game.ref != init_value:
self.divisions[game.div].teams[game.ref].refs += sign
self.divisions[game.div].teams[game.team1].games_per_day[day.num] += sign
self.divisions[game.div].teams[game.team2].games_per_day[day.num] += sign
def subtract_day_from_division_history(self, day, div_idx=None):
self.add_day_to_division_history(day, div_idx=div_idx, sign=-1)
def skillz_clinic_count(self):
# The number of skillz clinics is the number of open game slots
total_slots = self.daycount * self.courts * self.times
total_games = self.games_per_team * sum(self.team_counts) / 2 # 2 teams per game
self.skillz_clinics = total_slots - total_games
print("There will be %s skillz clinics in this schedule"
% self.skillz_clinics)
def create_json_schedule(self):
import json
sch_obj = []
for day in self.days:
court_list = []
for court in range(len(day.courts)):
time_list = []
for time in range(len(day.courts[0])):
game_dict = day.courts[court][time].gen_dict()
time_list.append(game_dict)
court_list.append(time_list)
sch_obj.append(court_list)
out = json.dumps(sch_obj)
return out
def clear_all_reffing_for_division(self, division_idx):
for day in self.days:
court_list = day.courts
for court in court_list:
for game in court:
if game.div == division_idx:
game.ref = init_value
for team in self.divisions[division_idx].teams:
team.refs = 0
def try_transfer_reffing(self, div, div_idx):
"""
The purpose of this method is to transfer ref responsibilities from a team with too
many to a team w too few. This routine ends once either list is empty
:param from_list: List of teams w the most reffings
:param to_list: List of teams w the least
:return: the division structure with the revised reffing history
"""
reffings = [team.refs for team in div.teams]
from_list = [idx for idx, ref_count in enumerate(reffings) if ref_count == max(reffings)]
to_list = [idx for idx, ref_count in enumerate(reffings) if ref_count == min(reffings)]
for day in self.days:
from_list, to_list, div = day.try_transfer_reffing(from_list, to_list, div, div_idx)
if not (from_list and to_list):
break
return div
def ref_transfering_is_neeeded(self, div):
reffings = [team.refs for team in div.teams]
if max(reffings) - min(reffings) > 1:
return True
else:
return False
def add_reffing(self, debug=False):
max_number_of_attempts = 1000 # not expected to happen
for div_idx, div in enumerate(self.divisions):
print('Adding Reffing for division {}'.format(div_idx))
for idx in range(max_number_of_attempts):
self.clear_all_reffing_for_division(div_idx)
for day_idx in range(len(self.days)):
self.days[day_idx].add_reffing(div_idx, self.divisions[div_idx])
if self.ref_transfering_is_neeeded(div):
div = self.try_transfer_reffing(div, div_idx)
if debug:
print(self.solution_debug_data(idx))
if self.fitness() == 0:
break
else:
print(self.solution_debug_data(1))
print('\n'.join(self.get_audit_text()))
raise(Exception('Could not find ref solution for div_idx {}'.format(div_idx)))
def switch_teams(self, div_idx, team1, team2):
teams = [team1, team2]
otherer = create_get_other(teams)
for day_idx, day in enumerate(self.days):
for court_idx, court in enumerate(day.courts):
for time_idx, game in enumerate(court):
if game.div == div_idx:
if game.team1 in teams:
game.team1 = otherer(game.team1)
if game.team2 in teams:
game.team2 = otherer(game.team2)
if game.ref in teams:
game.ref = otherer(game.ref)
self.days[day_idx].courts[court_idx][time_idx] = game
def solution_debug_data(self, mut_idx=0, div_idx=None):
fitness = self.fitness()
def get_sorted_breakdown(div_idx):
error_dict = self.fitness_error_breakdown(div_idx=div_idx)
return str(sorted(list(error_dict.items())))
if div_idx == None:
breakdown = '\n'.join(['division {} breakdown: {}'.format(div_idx, [get_sorted_breakdown(div_idx) for div_idx in range(5)])])
else:
breakdown = 'division {} breakdown: {}'.format(div_idx, get_sorted_breakdown(div_idx))
return "value = {} while on mutation step {}: division fitness = {}\n{}".format(
fitness, mut_idx, self.fitness_div_list(), breakdown)
def try_move_game_from_court(self, day_idx, target_court_idx, time):
div_idx = self.days[day_idx].courts[target_court_idx][time].div
for alternate_court_idx in range(len(self.days[day_idx].courts)):
alternate_game = self.days[day_idx].courts[alternate_court_idx][time]
if alternate_court_idx != target_court_idx and div_idx == alternate_game.div:
(self.days[day_idx].courts[target_court_idx][time],
self.days[day_idx].courts[alternate_court_idx][time]) = \
(self.days[day_idx].courts[alternate_court_idx][time],
self.days[day_idx].courts[target_court_idx][time])
break
def try_shift_team_out_of_court(self, div_teams, court_idx_to_avoid):
for div_idx, team_idx in div_teams:
for day_idx, day in enumerate(self.days):
for time, game in enumerate(day.courts[court_idx_to_avoid]):
if game.div == div_idx and team_idx in [game.team1, game.team2]:
self.try_move_game_from_court(day_idx, court_idx_to_avoid, time)
def switch_specific_games(self, game_data1, game_data2):
game1 = copy.deepcopy(self.days[game_data1['day']].courts[game_data1['court']][game_data1['time']])
game2 = copy.deepcopy(self.days[game_data2['day']].courts[game_data2['court']][game_data2['time']])
if game1.div != game2.div:
raise(Exception('Tried to switch games at {} and {} but they are not the same division:\n'.format(game_data1, game_data2) +
'game1 = {}, game2 = {}'.format(game1, game2)))
self.days[game_data1['day']].courts[game_data1['court']][game_data1['time']] = game2
self.days[game_data2['day']].courts[game_data2['court']][game_data2['time']] = game1
def switch_specific_refs(self, game_data1, game_data2):
game1 = copy.deepcopy(self.days[game_data1['day']].courts[game_data1['court']][game_data1['time']])
game2 = copy.deepcopy(self.days[game_data2['day']].courts[game_data2['court']][game_data2['time']])
if game1.div != game2.div:
raise(Exception('Tried to switch games at {} and {} but they are not the same division:\n'.format(game_data1, game_data2) +
'game1 = {}, game2 = {}'.format(game1, game2)))
self.days[game_data1['day']].courts[game_data1['court']][game_data1['time']].ref = game2.ref
self.days[game_data2['day']].courts[game_data2['court']][game_data2['time']].ref = game1.ref
def switch_days(self, day1, day2):
(self.days[day1], self.days[day2]) = (self.days[day2], self.days[day1])
self.days[day1].num = day1
self.days[day2].num = day2
self.days[day1].facilities.day_idx = day1
self.days[day2].facilities.day_idx = day2
def create_get_other(list):
def other(current):
if list[0] == current:
return list[1]
else:
return list[0]
return other
def gen_schedule_from_json(json_input):
import json
#out = Schedule()
def load_reg_schedule():
import pickle
path = '/Users/coulter/Desktop/life_notes/2016_q1/scvl/'
tag = '2016-01-22a_'
sch_py_obj = path + tag + 'python_file_obj.pickle'
with open(sch_py_obj, 'rb') as sch_file:
schedule = pickle.load(sch_file)
return schedule
if __name__ == '__main__':
sch = load_reg_schedule()
json_sch = sch.create_json_schedule()
print(json_sch)
sch2 = gen_schedule_from_json(json_sch) | mit | 7,339,886,801,671,712,000 | 47.723794 | 140 | 0.571307 | false |
pudo/aleph | aleph/tests/test_search_query.py | 1 | 3559 | from unittest import TestCase
from aleph.search.parser import SearchQueryParser
from aleph.search.query import Query
def query(args):
return Query(SearchQueryParser(args, None))
class QueryTestCase(TestCase):
def setUp(self):
# Allow list elements to be in any order
self.addTypeEqualityFunc(list, self.assertItemsEqual)
# The standard assertDictEqual doesn't compare values
# using assertEquals, so it fails to allow lists to be
# in any order
def assertDictEqual(self, d1, d2, msg=None):
for k, v1 in d1.items():
self.assertIn(k, d2, msg)
v2 = d2[k]
self.assertEqual(v1, v2, msg)
# The standard assertItemsEqual doesn't use assertEquals
# so fails to correctly compare complex data types
def assertItemsEqual(self, items1, items2, msg=None):
for item1 in items1:
has_equal = False
for item2 in items2:
try:
self.assertEqual(item1, item2)
has_equal = True
break
except Exception:
pass
if not has_equal:
self.fail('Item %r missing' % item1)
def test_no_text(self):
q = query([])
self.assertEqual(q.get_text_query(), [{'match_all': {}}])
def test_has_text(self):
q = query([('q', 'search text')])
text_q = q.get_text_query()
self.assertEqual(text_q[0]['query_string']['query'],
'search text')
def test_has_prefix(self):
q = query([('prefix', 'tex')])
text_q = q.get_text_query()
self.assertEqual(text_q[0]['match_phrase_prefix']['name'], 'tex')
def test_id_filter(self):
q = query([
('filter:id', '5'),
('filter:id', '8'),
('filter:id', '2'),
('filter:_id', '3')
])
self.assertEqual(q.get_filters(), [{
'ids': {
'values': ['8', '5', '2', '3']}
}
])
def test_filters(self):
q = query([
('filter:key1', 'foo'),
('filter:key1', 'bar'),
('filter:key2', 'blah'),
('filter:key2', 'blahblah')
])
self.assertEqual(q.get_filters(), [
{
'terms': {
'key1': ['foo', 'bar']
}
},
{
'terms': {
'key2': ['blah', 'blahblah']
}
}
])
def test_offset(self):
q = query([('offset', 10), ('limit', 100)])
body = q.get_body()
self.assertDictContainsSubset({'from': 10, 'size': 100}, body)
def test_post_filters(self):
q = query([
('filter:key1', 'foo'),
('post_filter:key2', 'foo'),
('post_filter:key2', 'bar'),
('post_filter:key3', 'blah'),
('post_filter:key3', 'blahblah')
])
self.assertEqual(q.get_filters(), [{
'term': {'key1': 'foo'}
}])
self.assertEqual(q.get_post_filters(), {
'bool': {
'filter': [
{
'terms': {
'key2': ['foo', 'bar']
}
},
{
'terms': {
'key3': ['blah', 'blahblah']
}
}
]
}
})
| mit | 531,886,608,683,313,000 | 28.658333 | 73 | 0.437763 | false |
printedheart/opennars | nars_gui/src/main/python/nef_theano/ensemble.py | 1 | 12468 | from theano.tensor.shared_randomstreams import RandomStreams
from theano import tensor as TT
import theano
import numpy
import neuron
import origin
def make_encoders(neurons, dimensions, srng, encoders=None):
"""Generates a set of encoders
:param int neurons: number of neurons
:param int dimensions: number of dimensions
:param theano.tensor.shared_randomstreams snrg: theano random number generator function
:param list encoders: set of possible preferred directions of neurons
"""
if encoders is None: # if no encoders specified
encoders = srng.normal((neurons, dimensions)) # generate randomly
else:
encoders = numpy.array(encoders) # if encoders were specified, cast list as array
# repeat array until 'encoders' is the same length as number of neurons in population
encoders = numpy.tile(encoders, (neurons / len(encoders) + 1, 1))[:neurons, :dimensions]
# normalize encoders across represented dimensions
norm = TT.sum(encoders * encoders, axis=[1], keepdims=True)
encoders = encoders / TT.sqrt(norm)
return theano.function([], encoders)()
class Accumulator:
def __init__(self, ensemble, pstc):
"""A collection of terminations in the same population, all sharing the same time constant
Stores the decoded_input accumulated across these terminations, i.e. their summed contribution to the represented signal
Also stores the direct_input value, which is direct current input when connections are added with a weight matrix specified
:param Ensemble ensemble: the ensemble this set of terminations is attached to
:param float pstc: post-synaptic time constant on filter
"""
self.ensemble = ensemble
# decoded_input should be dimensions * array_size because we account for the transform matrix here, so different array networks get different input
self.decoded_input = theano.shared(numpy.zeros(self.ensemble.dimensions * self.ensemble.array_size).astype('float32')) # the initial filtered decoded input
# encoded_input, however, is the same for all networks in the arrays, connecting directly to the neurons, so only needs to be size neurons_num
self.encoded_input = theano.shared(numpy.zeros(self.ensemble.neurons_num).astype('float32')) # the initial filtered encoded input
self.decay = numpy.exp(-self.ensemble.neurons.dt / pstc) # time constant for filter
self.decoded_total = None # the theano object representing the sum of the decoded inputs to this filter
self.encoded_total = None # the theano object representing the sum of the encoded inputs to this filter
def add_decoded_input(self, decoded_input):
"""Add to the current set of decoded inputs (with the same post-synaptic time constant pstc) an additional input
self.new_decoded_input is the calculation of the contribution of all of the decoded input with the same filtering
time constant to the ensemble, input current then calculated as the sum of all decoded_input x ensemble.encoders
:param decoded_input: theano object representing the output of the pre population multiplied by this termination's transform matrix
"""
if self.decoded_total is None: self.decoded_total = decoded_input # initialize internal value storing decoded input value to neurons
else: self.decoded_total = self.decoded_total + decoded_input # add to the decoded input to neurons
self.new_decoded_input = self.decay * self.decoded_input + (1 - self.decay) * self.decoded_total # the theano object representing the filtering operation
def add_encoded_input(self, encoded_input):
"""Add to the current set of encoded input (with the same post-synaptic time constant pstc) an additional input
self.new_encoded_input is the calculation of the contribution of all the encoded input with the same filtering
time constant to the ensemble, where the encoded_input is exactly the input current to each neuron in the ensemble
:param encoded_input: theano object representing the current output of every neuron of the pre population x a connection weight matrix
"""
if self.encoded_total is None: self.encoded_total = encoded_input # initialize internal value storing encoded input (current) to neurons
else: self.encoded_total = self.encoded_total + encoded_input # add input encoded input (current) to neurons
# flatten because a col + a vec gives a matrix type, but it's actually just a vector still
self.new_encoded_input = TT.flatten(self.decay * self.encoded_input + (1 - self.decay) * self.encoded_total) # the theano object representing the filtering operation
class Ensemble:
def __init__(self, neurons, dimensions, tau_ref=0.002, tau_rc=0.02, max_rate=(200,300), intercept=(-1.0,1.0),
radius=1.0, encoders=None, seed=None, neuron_type='lif', dt=0.001, array_size=1, eval_points=None):
"""Create an population of neurons with NEF parameters on top
:param int neurons: number of neurons in this population
:param int dimensions: number of dimensions in signal these neurons represent
:param float tau_ref: refractory period of neurons in this population
:param float tau_rc: RC constant
:param tuple max_rate: lower and upper bounds on randomly generate firing rates for neurons in this population
:param tuple intercept: lower and upper bounds on randomly generated x offset
:param float radius: the range of input values (-radius:radius) this population is sensitive to
:param list encoders: set of possible preferred directions of neurons
:param int seed: seed value for random number generator
:param string neuron_type: type of neuron model to use, options = {'lif'}
:param float dt: time step of neurons during update step
:param int array_size: number of sub-populations - for network arrays
:param list eval_points: specific set of points to optimize decoders over by default for this ensemble
"""
self.seed = seed
self.neurons_num = neurons
self.dimensions = dimensions
self.array_size = array_size
self.radius = radius
self.eval_points = eval_points
# create the neurons
# TODO: handle different neuron types, which may have different parameters to pass in
self.neurons = neuron.names[neuron_type]((array_size, self.neurons_num), tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
# compute alpha and bias
srng = RandomStreams(seed=seed) # set up theano random number generator
max_rates = srng.uniform([self.neurons_num], low=max_rate[0], high=max_rate[1])
threshold = srng.uniform([self.neurons_num], low=intercept[0], high=intercept[1])
alpha, self.bias = theano.function([], self.neurons.make_alpha_bias(max_rates, threshold))()
self.bias = self.bias.astype('float32') # force to 32 bit for consistency / speed
# compute encoders
self.encoders = make_encoders(self.neurons_num, dimensions, srng, encoders=encoders)
self.encoders = (self.encoders.T * alpha).T # combine encoders and gain for simplification
self.origin = {} # make origin dictionary
self.add_origin('X', func=None, eval_points=self.eval_points) # make default origin
self.accumulators = {} # dictionary of accumulators tracking terminations with different pstc values
def add_filtered_input(self, pstc, decoded_input=None, encoded_input=None):
"""Create a new termination that takes the given input (a theano object) and filters it with the given pstc
Adds its contributions to the set of decoded or encoded input with the same pstc
Decoded inputs are represented signals, encoded inputs are neuron activities * weight matrix
Can only have decoded OR encoded input != None
:param float pstc: post-synaptic time constant
:param decoded_input: theano object representing the decoded output of the pre population multiplied by this termination's transform matrix
:param encoded_input: theano object representing the encoded output of the pre population multiplied by a connection weight matrix
"""
# make sure one and only one of (decoded_input, encoded_input) is specified
assert (decoded_input is None or encoded_input is None)
assert (decoded_input is not None or encoded_input is not None)
if pstc not in self.accumulators: # make sure there's an accumulator for given pstc
self.accumulators[pstc] = Accumulator(self, pstc)
# add this termination's contribution to the set of terminations with the same pstc
if decoded_input is not None:
# rescale decoded_input by this neurons radius to put us in the right range
self.accumulators[pstc].add_decoded_input(TT.true_div(decoded_input, self.radius))
else:
self.accumulators[pstc].add_encoded_input(encoded_input)
def add_origin(self, name, func, eval_points=None):
"""Create a new origin to perform a given function over the represented signal
:param string name: name of origin
:param function func: desired transformation to perform over represented signal
:param list eval_points: specific set of points to optimize decoders over for this origin
"""
if eval_points == None: eval_points = self.eval_points
self.origin[name] = origin.Origin(self, func, eval_points=eval_points)
def update(self):
"""Compute the set of theano updates needed for this ensemble
Returns dictionary with new neuron state, termination, and origin values
"""
# find the total input current to this population of neurons
input_current = numpy.tile(self.bias, (self.array_size, 1)) # apply respective biases to neurons in the population
X = numpy.zeros(self.dimensions * self.array_size) # set up matrix to store accumulated decoded input, same size as decoded_input
for a in self.accumulators.values():
if hasattr(a, 'new_decoded_input'): # if there's a decoded input in this accumulator,
X += a.new_decoded_input # add its values to the total decoded input
if hasattr(a, 'new_encoded_input'): # if there's an encoded input in this accumulator
# encoded input is the same to every array network
input_current += a.new_encoded_input # add its values directly to the input current
#TODO: optimize for when nothing is added to X (ie there are no decoded inputs)
X = X.reshape((self.array_size, self.dimensions)) # reshape decoded input for network arrays
# find input current caused by decoded input signals
input_current += TT.dot(X, self.encoders.T) # calculate input_current for each neuron as represented input signal x preferred direction
# pass that total into the neuron model to produce the main theano computation
updates = self.neurons.update(input_current) # updates is an ordered dictionary of theano internal variables to update
for a in self.accumulators.values():
# also update the filtered decoded and encoded internal theano variables for the accumulators
if hasattr(a, 'new_decoded_input'): # if there's a decoded input in this accumulator,
updates[a.decoded_input] = a.new_decoded_input.astype('float32') # add accumulated decoded inputs to theano internal variable updates
if hasattr(a, 'new_encoded_input'): # if there's an encoded input in this accumulator,
updates[a.encoded_input] = a.new_encoded_input.astype('float32') # add accumulated encoded inputs to theano internal variable updates
# and compute the decoded origin decoded_input from the neuron output
for o in self.origin.values():
# in the dictionary updates, set each origin's output decoded_input equal to the self.neuron.output() we just calculated
updates.update(o.update(updates[self.neurons.output]))
return updates
| agpl-3.0 | 7,203,607,654,707,447,000 | 64.621053 | 181 | 0.69538 | false |
lastralab/Statistics | Specialization/Personal/RotationToEuler.py | 1 | 2753 | #!/usr/bin/env python
# Copyright (c) 2016 Satya Mallick <[email protected]>
# All rights reserved. No warranty, explicit or implicit, provided.
import cv2
import numpy as np
import math
import random
R = np.matrix([[0.6927,-0.7146,0.0978],[0.7165,0.6973,0.0198],[-0.0824,0.0564,0.995]])
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R) :
# assert(isRotationMatrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
# Calculates Rotation Matrix given euler angles.
def eulerAnglesToRotationMatrix(theta) :
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
RR = np.dot(R_z, np.dot( R_y, R_x ))
return RR
if __name__ == '__main__' :
# Randomly generate Euler angles
# e = np.random.rand(3) * math.pi * 2 - math.pi
# Calculate rotation matrix
# RR = eulerAnglesToRotationMatrix(e)
# Calculate Euler angles from rotation matrix
e1 = rotationMatrixToEulerAngles(R)
# Calculate rotation matrix
R1 = eulerAnglesToRotationMatrix(e1)
# Note e and e1 will be the same a lot of times
# but not always. R and R1 should be the same always.
# print "\nInput Euler angles :\n{0}".format(e)
print "\nR :\n{0}".format(R)
print "\nOutput Euler angles :\n{0}".format(e1)
# print "\nR1 :\n{0}".format(R1)
| mit | 2,462,491,476,373,613,000 | 29.588889 | 86 | 0.505993 | false |
pmghalvorsen/gramps_branch | gramps/plugins/lib/libgedcom.py | 1 | 291897 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009-2010 Gary Burton
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Import from GEDCOM
The GEDCOM file format is defined by the GEDCOM 5.5 Specfication, found
at http://www.familysearch.org/GEDCOM/GEDCOM55.EXE
The basic structure is a line with three attributes:
<LEVEL> <TOKEN> <DATA>
Because of this structure, it does not lend itself to more traditional
parsing techniques, such as LALR. The LEVEL token implies too much to be
useful in this context. While this makes parsing more difficult, it
does provide one very beneficial feature for GEDCOM: Error recoverability.
GEDCOM is a poorly implemented standard, primarily because it is a poor
standard to begin with.
Most commercial applications that implement GEDCOM output add custom
extensions, and feel free to violate the existing structure. If one were
cynical, one might believe that the commercial programs were trying to
make it difficult to transfer your data to another application.
This parser takes a different approach to parsing a GEDCOM file. The first
state, Lexer, reads lines from the file, and does some basic lexical
analysis on each line (actually several lines, since it automatically
combines CONT and CONC tagged lines). Each logical line returned to this
parser contains:
Level, Token, Token text, Data, and line number.
The Data field is typically text, but in some cases, it may be a integer
value representing an enumerated type or a GRAMPS object (in the case of
dates).
The parser works on the current level. Each context and level has a an
associated table (dictionary) of functions indexed by the corresponding
TOKEN. When a token is found, we index into the table to find the function
associated with the token. If no token is found, a function that skips the
line and all subordinate (lines with a higher number). If a function is
found, then we call that function, which in turn processes the line, and
all tokens at the lower level.
For example:
1 BIRT
2 DATE 1 JAN 2000
2 UKNOWN TAG
3 NOTE DATA
The function parsing the individual at level 1, would encounter the BIRT tag.
It would look up the BIRT token in the table to see if a function as defined
for this TOKEN, and pass control to this function. This function would then
start parsing level 2. It would encounter the DATE tag, look up the
corresponding function in the level 2 table, and pass control to its
associated function. This function would terminate, and return control back to
the level 2 parser, which would then encounter the "UKNOWN" tag. Since this is
not a valid token, it would not be in the table, and a function that would skip
all lines until the next level 2 token is found (in this case, skipping the
"3 NOTE DATA" line.
"""
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
from __future__ import print_function, unicode_literals
import os
import sys
import re
import time
import codecs
from xml.parsers.expat import ParserCreate
from collections import defaultdict
import string
if sys.version_info[0] < 3:
from cStringIO import StringIO
else:
from io import StringIO
if sys.version_info[0] < 3:
from urlparse import urlparse
else:
from urllib.parse import urlparse
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".libgedcom")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import GedcomError
from gramps.gen.const import DATA_DIR
from gramps.gen.lib import (Address, Attribute, AttributeType, ChildRef,
ChildRefType, Citation, Date, Event, EventRef, EventRoleType,
EventType, Family, FamilyRelType, LdsOrd, Location, MediaObject,
MediaRef, Name, NameType, Note, NoteType, Person, PersonRef, Place,
RepoRef, Repository, RepositoryType, Researcher,
Source, SourceMediaType, SrcAttribute, SrcAttributeType,
Surname, Tag, Url, UrlType, PlaceType, PlaceRef)
from gramps.gen.db import DbTxn
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.mime import get_type
from gramps.gen.utils.id import create_id
from gramps.gen.utils.lds import TEMPLES
from gramps.gen.utils.unknown import make_unknown, create_explanation_note
from gramps.gen.datehandler._dateparser import DateParser
from gramps.gen.db.dbconst import EVENT_KEY
from gramps.gui.dialog import WarningDialog
from gramps.gen.lib.const import IDENTICAL, DIFFERENT
from gramps.gen.lib import (StyledText, StyledTextTag, StyledTextTagType)
from gramps.gen.constfunc import cuni, conv_to_unicode, STRTYPE, UNITYPE, win
from gramps.plugins.lib.libplaceimport import PlaceImport
# string.whitespace in some configuration is changed if it is imported
# after setting locale (adding '0xa0')
if win() and sys.version_info[0] < 3:
string.whitespace = ' \t\n\r\v\f'
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
TOKEN_UNKNOWN = 0
TOKEN_ABBR = 1
TOKEN_ADDR = 2
TOKEN_ADOP = 3
TOKEN_ADR1 = 4
TOKEN_ADR2 = 5
TOKEN_AFN = 6
TOKEN_IGNORE = 7
TOKEN_REFN = 8
TOKEN__AKA = 9
TOKEN_ALIA = 11
TOKEN_ANCI = 12
TOKEN_ASSO = 13
TOKEN_AUTH = 14
TOKEN_BAPL = 15
TOKEN_BIRT = 16
TOKEN__CAT = 17
TOKEN_CAUS = 18
TOKEN_CHAN = 19
TOKEN_CHAR = 20
TOKEN_CHIL = 21
TOKEN_CITY = 22
TOKEN__COMM = 23
TOKEN_CONC = 24
TOKEN_CONT = 25
TOKEN_COPR = 26
TOKEN_CORP = 27
TOKEN_CTRY = 28
TOKEN_DATA = 29
TOKEN_DATE = 30
TOKEN_DEAT = 32
TOKEN_DESI = 33
TOKEN_DEST = 34
TOKEN_ENDL = 35
TOKEN_EVEN = 36
TOKEN_FAM = 38
TOKEN_FAMC = 39
TOKEN_FAMS = 40
TOKEN_FILE = 41
TOKEN_FORM = 42
TOKEN__FREL = 43
TOKEN_GEDC = 44
TOKEN_GIVN = 45
TOKEN__GODP = 46
TOKEN_HUSB = 47
TOKEN_INDI = 48
TOKEN_LABL = 49
TOKEN_LANG = 50
TOKEN__LOC = 51
TOKEN__MARNM = 52
TOKEN__MREL = 53
TOKEN__NAME = 54
TOKEN_NAME = 55
TOKEN_NCHI = 56
TOKEN_NICK = 57
TOKEN_NOTE = 58
TOKEN_NPFX = 59
TOKEN_NSFX = 60
TOKEN_OBJE = 61
TOKEN_OFFI = 62
TOKEN_PAGE = 63
TOKEN_PEDI = 64
TOKEN_PERI = 65
TOKEN_PHON = 66
TOKEN_PLAC = 67
TOKEN_POST = 68
TOKEN__PRIMARY = 69
TOKEN__PRIV = 70
TOKEN_PUBL = 71
TOKEN_QUAY = 72
TOKEN_RELI = 74
TOKEN_REPO = 75
TOKEN_RESI = 76
TOKEN_RFN = 77
TOKEN_RIN = 78
TOKEN__SCHEMA = 79
TOKEN_SEX = 80
TOKEN_SLGC = 81
TOKEN_SLGS = 82
TOKEN_SOUR = 83
TOKEN_SPFX = 84
TOKEN_STAE = 85
TOKEN__STAT = 86
TOKEN_STAT = 87
TOKEN_SUBM = 88
TOKEN_SUBN = 89
TOKEN_SURN = 90
TOKEN_TAXT = 91
TOKEN_TEMP = 92
TOKEN_TEXT = 93
TOKEN_TIME = 94
TOKEN_TITL = 95
TOKEN__TODO = 96
TOKEN_TRLR = 97
TOKEN_TYPE = 98
TOKEN__UID = 99
TOKEN_VERS = 100
TOKEN_WIFE = 101
TOKEN__WITN = 102
TOKEN__WTN = 103
TOKEN_AGNC = 104
TOKEN_HEAD = 105
TOKEN_CALN = 106
TOKEN_MEDI = 107
TOKEN_RELA = 108
TOKEN__LKD = 109
TOKEN_BLOB = 110
TOKEN_CONL = 111
TOKEN_AGE = 112
TOKEN_RESN = 114
TOKEN_ID = 115
TOKEN_GEVENT = 116
TOKEN_RNOTE = 117
TOKEN_GATTR = 118
TOKEN_ATTR = 119
TOKEN_MAP = 120
TOKEN_LATI = 121
TOKEN_LONG = 122
TOKEN_FACT = 123
TOKEN_EMAIL = 124
TOKEN_WWW = 125
TOKEN_URL = 126
TOKEN_ROLE = 127
TOKEN__MAR = 128
TOKEN__MARN = 129
TOKEN__ADPN = 130
TOKEN__FSFTID = 131
TOKENS = {
"HEAD" : TOKEN_HEAD, "MEDI" : TOKEN_MEDI,
"HEADER" : TOKEN_HEAD, "TRAILER" : TOKEN_TRLR,
"CALL_NUMBER" : TOKEN_CALN, "MEDIA" : TOKEN_MEDI,
"CALN" : TOKEN_CALN, "ABBR" : TOKEN_ABBR,
"ABBREVIATION" : TOKEN_ABBR, "ADDR" : TOKEN_ADDR,
"ADDRESS" : TOKEN_ADDR, "ADOP" : TOKEN_ADOP,
"ADOPT" : TOKEN_ADOP, "ADR1" : TOKEN_ADR1,
"ADDRESS1" : TOKEN_ADR1, "ADR2" : TOKEN_ADR2,
"ADDRESS2" : TOKEN_ADR2, "AFN" : TOKEN_AFN,
"AGE" : TOKEN_AGE, "AGNC" : TOKEN_AGNC,
"AGENCY" : TOKEN_IGNORE, "_AKA" : TOKEN__AKA,
"_AKAN" : TOKEN__AKA, "AKA" : TOKEN__AKA,
"_ALIA" : TOKEN_ALIA, "ALIA" : TOKEN_ALIA,
"ALIAS" : TOKEN_ALIA, "ANCI" : TOKEN_ANCI,
"ASSO" : TOKEN_ASSO, "ASSOCIATES" : TOKEN_ASSO,
"AUTH" : TOKEN_AUTH, "AUTHOR" : TOKEN_AUTH,
"BAPL" : TOKEN_BAPL, "BAPTISM-LDS" : TOKEN_BAPL,
"BIRT" : TOKEN_BIRT, "BIRTH" : TOKEN_BIRT,
"_CAT" : TOKEN_IGNORE, "CAUS" : TOKEN_CAUS,
"CAUSE" : TOKEN_CAUS, "CHAN" : TOKEN_CHAN,
"CHANGE" : TOKEN_CHAN, "CHAR" : TOKEN_CHAR,
"CHARACTER" : TOKEN_CHAR, "CHIL" : TOKEN_CHIL,
"CHILD" : TOKEN_CHIL, "CITY" : TOKEN_CITY,
"_COMM" : TOKEN__COMM, "CONC" : TOKEN_CONC,
"CONCATENTATE" : TOKEN_CONC, "CONT" : TOKEN_CONT,
"CONTINUED" : TOKEN_CONT, "CONCATENATION": TOKEN_CONC,
"CONTINUATION" : TOKEN_CONT, "COPR" : TOKEN_COPR,
"COPYRIGHT" : TOKEN_COPR, "CORP" : TOKEN_CORP,
"CORPORATION" : TOKEN_CORP, "CTRY" : TOKEN_CTRY,
"COUNTRY" : TOKEN_CTRY, "DATA" : TOKEN_DATA,
"DATE" : TOKEN_DATE, "_DATE2" : TOKEN_IGNORE,
"DEAT" : TOKEN_DEAT, "DEATH" : TOKEN_DEAT,
"DESI" : TOKEN_DESI, "DEST" : TOKEN_DEST,
"DESTINATION" : TOKEN_DEST, "ENDL" : TOKEN_ENDL,
"ENDOWMENT" : TOKEN_ENDL, "EVEN" : TOKEN_EVEN,
"EVENT" : TOKEN_EVEN, "_ANCES_ORDRE" : TOKEN_IGNORE,
"FAM" : TOKEN_FAM, "FAMILY" : TOKEN_FAM,
"FAMC" : TOKEN_FAMC, "FAMILY_CHILD" : TOKEN_FAMC,
"FAMS" : TOKEN_FAMS, "FAMILY_SPOUSE" : TOKEN_FAMS,
"FILE" : TOKEN_FILE, "FORM" : TOKEN_FORM,
"_FREL" : TOKEN__FREL, "GEDC" : TOKEN_GEDC,
"GEDCOM" : TOKEN_GEDC, "GIVN" : TOKEN_GIVN,
"GIVEN_NAME" : TOKEN_GIVN, "_GODP" : TOKEN__GODP,
"HUSB" : TOKEN_HUSB, "HUSBAND" : TOKEN_HUSB,
"INDI" : TOKEN_INDI, "INDIVIDUAL" : TOKEN_INDI,
"LABL" : TOKEN_LABL, "LABEL" : TOKEN_LABL,
"LANG" : TOKEN_LANG, "_LOC" : TOKEN__LOC,
"_MARNM" : TOKEN__MARNM, "_MREL" : TOKEN__MREL,
"_NAME" : TOKEN__NAME, "NAME" : TOKEN_NAME,
"NCHI" : TOKEN_NCHI, "CHILDREN_COUNT": TOKEN_NCHI,
"NICK" : TOKEN_NICK, "NICKNAME" : TOKEN_NICK,
"NOTE" : TOKEN_NOTE, "NPFX" : TOKEN_NPFX,
"NAME_PREFIX" : TOKEN_NPFX, "NSFX" : TOKEN_NSFX,
"NAME_SUFFIX" : TOKEN_NSFX, "OBJE" : TOKEN_OBJE,
"OBJECT" : TOKEN_OBJE, "OFFI" : TOKEN_OFFI,
"PAGE" : TOKEN_PAGE, "PEDIGREE" : TOKEN_PEDI,
"PEDI" : TOKEN_PEDI, "PERI" : TOKEN_PERI,
"PHON" : TOKEN_PHON, "PHONE" : TOKEN_PHON,
"PHONE_NUMBER" : TOKEN_PHON, "PLAC" : TOKEN_PLAC,
"PLACE" : TOKEN_PLAC, "POST" : TOKEN_POST,
"POSTAL_CODE" : TOKEN_POST, "_PRIMARY" : TOKEN__PRIMARY,
"_PRIV" : TOKEN__PRIV, "PUBL" : TOKEN_PUBL,
"PUBLICATION" : TOKEN_PUBL, "QUAY" : TOKEN_QUAY,
"QUALITY_OF_DATA": TOKEN_QUAY, "REFN" : TOKEN_REFN,
"REFERENCE" : TOKEN_REFN, "RELI" : TOKEN_RELI,
"RELIGION" : TOKEN_RELI, "REPO" : TOKEN_REPO,
"REPOSITORY" : TOKEN_REPO, "RFN" : TOKEN_RFN,
"RIN" : TOKEN_RIN, "ROLE" : TOKEN_ROLE,
"_SCHEMA" : TOKEN__SCHEMA,
"SEX" : TOKEN_SEX, "SCHEMA" : TOKEN__SCHEMA,
"SLGC" : TOKEN_SLGC, "SLGS" : TOKEN_SLGS,
"SOUR" : TOKEN_SOUR, "SOURCE" : TOKEN_SOUR,
"SPFX" : TOKEN_SPFX, "SURN_PREFIX" : TOKEN_SPFX,
"STAE" : TOKEN_STAE, "STATE" : TOKEN_STAE,
"_STAT" : TOKEN__STAT, "STAT" : TOKEN_STAT,
"STATUS" : TOKEN_STAT, "SUBM" : TOKEN_SUBM,
"SUBMITTER" : TOKEN_SUBM, "SUBN" : TOKEN_SUBN,
"SUBMISSION" : TOKEN_SUBN, "SURN" : TOKEN_SURN,
"SURNAME" : TOKEN_SURN, "TAXT" : TOKEN_TAXT,
"TEMP" : TOKEN_TEMP, "TEMPLE" : TOKEN_TEMP,
"TEXT" : TOKEN_TEXT, "TIME" : TOKEN_TIME,
"TITL" : TOKEN_TITL, "TITLE" : TOKEN_TITL,
"_TODO" : TOKEN__TODO, "TRLR" : TOKEN_TRLR,
"TYPE" : TOKEN_TYPE,
"_UID" : TOKEN__UID, "VERS" : TOKEN_VERS,
"VERSION" : TOKEN_VERS, "WIFE" : TOKEN_WIFE,
"_WITN" : TOKEN__WITN, "_WTN" : TOKEN__WTN,
"_CHUR" : TOKEN_IGNORE,"RELA" : TOKEN_RELA,
"_DETAIL" : TOKEN_IGNORE,"_PREF" : TOKEN__PRIMARY,
"_LKD" : TOKEN__LKD, "_DATE" : TOKEN_IGNORE,
"_SCBK" : TOKEN_IGNORE,"_TYPE" : TOKEN_TYPE,
"_PRIM" : TOKEN_IGNORE,"_SSHOW" : TOKEN_IGNORE,
"_PAREN" : TOKEN_IGNORE,"BLOB" : TOKEN_BLOB,
"CONL" : TOKEN_CONL, "RESN" : TOKEN_RESN,
"_MEDI" : TOKEN_MEDI, "_MASTER" : TOKEN_IGNORE,
"_LEVEL" : TOKEN_IGNORE,"_PUBLISHER" : TOKEN_IGNORE,
"MAP" : TOKEN_MAP, "LATI" : TOKEN_LATI,
"LONG" : TOKEN_LONG, "_ITALIC" : TOKEN_IGNORE,
"_PLACE" : TOKEN_IGNORE,
"FACT" : TOKEN_FACT, "EMAIL" : TOKEN_EMAIL,
"EMAI" : TOKEN_EMAIL, "WWW" : TOKEN_WWW,
"_URL" : TOKEN_URL, "URL" : TOKEN_URL,
"_MAR" : TOKEN__MAR, "_MARN" : TOKEN__MARN,
"_ADPN" : TOKEN__ADPN, "_FSFTID" : TOKEN__FSFTID,
}
ADOPT_NONE = 0
ADOPT_EVENT = 1
ADOPT_FTW = 2
ADOPT_LEGACY = 3
ADOPT_PEDI = 4
ADOPT_STD = 5
CONC_OK = 0
CONC_BROKEN = 1
ALT_NAME_NONE = 0
ALT_NAME_STD = 1
ALT_NAME_ALIAS = 2
ALT_NAME_AKA = 3
ALT_NAME_EVENT_AKA = 4
ALT_NAME_UALIAS = 5
CALENDAR_NO = 0
CALENDAR_YES = 1
OBJE_NO = 0
OBJE_YES = 1
PREFIX_NO = 0
PREFIX_YES = 1
RESIDENCE_ADDR = 0
RESIDENCE_PLAC = 1
SOURCE_REFS_NO = 0
SOURCE_REFS_YES = 1
TYPE_BIRTH = ChildRefType()
TYPE_ADOPT = ChildRefType(ChildRefType.ADOPTED)
TYPE_FOSTER = ChildRefType(ChildRefType.FOSTER)
RELATION_TYPES = (
ChildRefType.BIRTH,
ChildRefType.UNKNOWN,
ChildRefType.NONE,
)
PEDIGREE_TYPES = {
'birth' : ChildRefType(),
'natural': ChildRefType(),
'step' : TYPE_ADOPT,
'adopted': TYPE_ADOPT,
'foster' : TYPE_FOSTER,
}
MIME_MAP = {
'jpeg' : 'image/jpeg', 'jpg' : 'image/jpeg',
'rtf' : 'text/rtf', 'pdf' : 'application/pdf',
'mpeg' : 'video/mpeg', 'mpg' : 'video/mpeg',
'gif' : 'image/gif', 'bmp' : 'image/x-ms-bmp',
'tiff' : 'image/tiff', 'aif' : 'audio/x-aiff',
'text' : 'text/plain', 'w8bn' : 'application/msword',
'wav' : 'audio/x-wav', 'mov' : 'video/quicktime',
}
FTW_BAD_PLACE = [
EventType.OCCUPATION,
EventType.RELIGION,
EventType.DEGREE
]
MEDIA_MAP = {
'audio' : SourceMediaType.AUDIO,
'book' : SourceMediaType.BOOK,
'card' : SourceMediaType.CARD,
'electronic' : SourceMediaType.ELECTRONIC,
'fiche' : SourceMediaType.FICHE,
'microfiche' : SourceMediaType.FICHE,
'microfilm' : SourceMediaType.FICHE,
'film' : SourceMediaType.FILM,
'magazine' : SourceMediaType.MAGAZINE,
'manuscript' : SourceMediaType.MANUSCRIPT,
'map' : SourceMediaType.MAP,
'newspaper' : SourceMediaType.NEWSPAPER,
'photo' : SourceMediaType.PHOTO,
'tombstone' : SourceMediaType.TOMBSTONE,
'grave' : SourceMediaType.TOMBSTONE,
'video' : SourceMediaType.VIDEO,
}
#-------------------------------------------------------------------------
#
# Integer to GEDCOM tag mappings for constants
#
#-------------------------------------------------------------------------
CALENDAR_MAP_GEDCOM2XML = {
"FRENCH R" : Date.CAL_FRENCH,
"JULIAN" : Date.CAL_JULIAN,
"HEBREW" : Date.CAL_HEBREW,
}
QUALITY_MAP = {
'CAL' : Date.QUAL_CALCULATED,
'INT' : Date.QUAL_CALCULATED,
'EST' : Date.QUAL_ESTIMATED,
}
SEX_MAP = {
'F' : Person.FEMALE,
'M' : Person.MALE,
}
FAMILYCONSTANTEVENTS = {
EventType.ANNULMENT : "ANUL",
EventType.DIV_FILING : "DIVF",
EventType.DIVORCE : "DIV",
EventType.CENSUS : "CENS",
EventType.ENGAGEMENT : "ENGA",
EventType.MARR_BANNS : "MARB",
EventType.MARR_CONTR : "MARC",
EventType.MARR_LIC : "MARL",
EventType.MARR_SETTL : "MARS",
EventType.MARRIAGE : "MARR"
}
PERSONALCONSTANTEVENTS = {
EventType.ADOPT : "ADOP",
EventType.ADULT_CHRISTEN : "CHRA",
EventType.BIRTH : "BIRT",
EventType.DEATH : "DEAT",
EventType.BAPTISM : "BAPM",
EventType.BAR_MITZVAH : "BARM",
EventType.BAS_MITZVAH : "BASM",
EventType.BLESS : "BLES",
EventType.BURIAL : "BURI",
EventType.CAUSE_DEATH : "CAUS",
EventType.ORDINATION : "ORDN",
EventType.CENSUS : "CENS",
EventType.CHRISTEN : "CHR" ,
EventType.CONFIRMATION : "CONF",
EventType.CREMATION : "CREM",
EventType.DEGREE : "_DEG",
EventType.DIV_FILING : "DIVF",
EventType.EDUCATION : "EDUC",
EventType.ELECTED : "",
EventType.EMIGRATION : "EMIG",
EventType.FIRST_COMMUN : "FCOM",
EventType.GRADUATION : "GRAD",
EventType.MED_INFO : "_MDCL",
EventType.MILITARY_SERV : "_MILT",
EventType.NATURALIZATION : "NATU",
EventType.NOB_TITLE : "TITL",
EventType.NUM_MARRIAGES : "NMR",
EventType.IMMIGRATION : "IMMI",
EventType.OCCUPATION : "OCCU",
EventType.PROBATE : "PROB",
EventType.PROPERTY : "PROP",
EventType.RELIGION : "RELI",
EventType.RESIDENCE : "RESI",
EventType.RETIREMENT : "RETI",
EventType.WILL : "WILL",
}
FAMILYCONSTANTATTRIBUTES = {
AttributeType.NUM_CHILD : "NCHI",
}
PERSONALCONSTANTATTRIBUTES = {
AttributeType.CASTE : "CAST",
AttributeType.DESCRIPTION : "DSCR",
AttributeType.ID : "IDNO",
AttributeType.NATIONAL : "NATI",
AttributeType.NUM_CHILD : "NCHI",
AttributeType.SSN : "SSN",
}
#-------------------------------------------------------------------------
#
# Gedcom to int constants
#
#-------------------------------------------------------------------------
LDS_STATUS = {
"BIC" : LdsOrd.STATUS_BIC,
"CANCELED" : LdsOrd.STATUS_CANCELED,
"CHILD" : LdsOrd.STATUS_CHILD,
"CLEARED" : LdsOrd.STATUS_CLEARED,
"COMPLETED": LdsOrd.STATUS_COMPLETED,
"DNS" : LdsOrd.STATUS_DNS,
"INFANT" : LdsOrd.STATUS_INFANT,
"PRE-1970" : LdsOrd.STATUS_PRE_1970,
"QUALIFIED": LdsOrd.STATUS_QUALIFIED,
"DNS/CAN" : LdsOrd.STATUS_DNS_CAN,
"STILLBORN": LdsOrd.STATUS_STILLBORN,
"SUBMITTED": LdsOrd.STATUS_SUBMITTED,
"UNCLEARED": LdsOrd.STATUS_UNCLEARED,
}
# table for skipping illegal control chars in GEDCOM import
# Only 09, 0A, 0D are allowed.
STRIP_DICT = dict.fromkeys(list(range(9))+list(range(11, 13))+list(range(14, 32)))
#-------------------------------------------------------------------------
#
# GEDCOM events to GRAMPS events conversion
#
#-------------------------------------------------------------------------
GED_TO_GRAMPS_EVENT = {}
for __val, __key in PERSONALCONSTANTEVENTS.items():
if __key != "":
GED_TO_GRAMPS_EVENT[__key] = __val
for __val, __key in FAMILYCONSTANTEVENTS.items():
if __key != "":
GED_TO_GRAMPS_EVENT[__key] = __val
GED_TO_GRAMPS_ATTR = {}
for __val, __key in PERSONALCONSTANTATTRIBUTES.items():
if __key != "":
GED_TO_GRAMPS_ATTR[__key] = __val
#-------------------------------------------------------------------------
#
# GEDCOM Date Constants
#
#-------------------------------------------------------------------------
HMONTH = [
"", "ELUL", "TSH", "CSH", "KSL", "TVT", "SHV", "ADR",
"ADS", "NSN", "IYR", "SVN", "TMZ", "AAV", "ELL" ]
FMONTH = [
"", "VEND", "BRUM", "FRIM", "NIVO", "PLUV", "VENT",
"GERM", "FLOR", "PRAI", "MESS", "THER", "FRUC", "COMP"]
MONTH = [
"", "JAN", "FEB", "MAR", "APR", "MAY", "JUN",
"JUL", "AUG", "SEP", "OCT", "NOV", "DEC" ]
CALENDAR_MAP = {
Date.CAL_HEBREW : (HMONTH, '@#DHEBREW@'),
Date.CAL_FRENCH : (FMONTH, '@#DFRENCH R@'),
Date.CAL_JULIAN : (MONTH, '@#DJULIAN@'),
Date.CAL_SWEDISH : (MONTH, '@#DUNKNOWN@'),
}
CALENDAR_MAP_PARSESTRING = {
Date.CAL_HEBREW : ' (h)',
Date.CAL_FRENCH : ' (f)',
Date.CAL_JULIAN : ' (j)',
Date.CAL_SWEDISH : ' (s)',
}
#how wrong calendar use is shown
CALENDAR_MAP_WRONGSTRING = {
Date.CAL_HEBREW : ' <hebrew>',
Date.CAL_FRENCH : ' <french rep>',
Date.CAL_JULIAN : ' <julian>',
Date.CAL_SWEDISH : ' <swedish>',
}
DATE_MODIFIER = {
Date.MOD_ABOUT : "ABT",
Date.MOD_BEFORE : "BEF",
Date.MOD_AFTER : "AFT",
#Date.MOD_INTERPRETED : "INT",
}
DATE_QUALITY = {
Date.QUAL_CALCULATED : "CAL",
Date.QUAL_ESTIMATED : "EST",
}
#-------------------------------------------------------------------------
#
# regular expressions
#
#-------------------------------------------------------------------------
NOTE_RE = re.compile(r"\s*\d+\s+\@(\S+)\@\s+NOTE(.*)$")
CONT_RE = re.compile(r"\s*\d+\s+CONT\s?(.*)$")
CONC_RE = re.compile(r"\s*\d+\s+CONC\s?(.*)$")
PERSON_RE = re.compile(r"\s*\d+\s+\@(\S+)\@\s+INDI(.*)$")
MOD = re.compile(r"\s*(INT|EST|CAL)\s+(.*)$")
CAL = re.compile(r"\s*(ABT|BEF|AFT)?\s*@#D?([^@]+)@\s*(.*)$")
RANGE = re.compile(r"\s*BET\s+@#D?([^@]+)@\s*(.*)\s+AND\s+@#D?([^@]+)@\s*(.*)$")
RANGE1 = re.compile(r"\s*BET\s+\s*(.*)\s+AND\s+@#D?([^@]+)@\s*(.*)$")
RANGE2 = re.compile(r"\s*BET\s+@#D?([^@]+)@\s*(.*)\s+AND\s+\s*(.*)$")
SPAN = re.compile(r"\s*FROM\s+@#D?([^@]+)@\s*(.*)\s+TO\s+@#D?([^@]+)@\s*(.*)$")
SPAN1 = re.compile(r"\s*FROM\s+\s*(.*)\s+TO\s+@#D?([^@]+)@\s*(.*)$")
SPAN2 = re.compile(r"\s*FROM\s+@#D?([^@]+)@\s*(.*)\s+TO\s+\s*(.*)$")
NAME_RE = re.compile(r"/?([^/]*)(/([^/]*)(/([^/]*))?)?")
SURNAME_RE = re.compile(r"/([^/]*)/([^/]*)")
#-----------------------------------------------------------------------
#
# GedcomDateParser
#
#-----------------------------------------------------------------------
class GedcomDateParser(DateParser):
month_to_int = {
'jan' : 1, 'feb' : 2, 'mar' : 3, 'apr' : 4,
'may' : 5, 'jun' : 6, 'jul' : 7, 'aug' : 8,
'sep' : 9, 'oct' : 10, 'nov' : 11, 'dec' : 12,
}
#-------------------------------------------------------------------------
#
# Lexer - serves as the lexical analysis engine
#
#-------------------------------------------------------------------------
class Lexer(object):
def __init__(self, ifile):
self.ifile = ifile
self.current_list = []
self.eof = False
self.cnv = None
self.cnt = 0
self.index = 0
self.func_map = {
TOKEN_CONT : self.__fix_token_cont,
TOKEN_CONC : self.__fix_token_conc,
}
def readline(self):
if len(self.current_list) <= 1 and not self.eof:
self.__readahead()
try:
return GedLine(self.current_list.pop())
except:
LOG.debug('Error in reading Gedcom line', exc_info=True)
return None
def __fix_token_cont(self, data):
line = self.current_list[0]
new_value = line[2] + '\n' + data[2]
self.current_list[0] = (line[0], line[1], new_value, line[3], line[4])
def __fix_token_conc(self, data):
line = self.current_list[0]
if len(line[2]) == 4:
# This deals with lines of the form
# 0 @<XREF:NOTE>@ NOTE
# 1 CONC <SUBMITTER TEXT>
# The previous line contains only a tag and no data so concat a
# space to separate the new line from the tag. This prevents the
# first letter of the new line being lost later
# in _GedcomParse.__parse_record
new_value = line[2] + ' ' + data[2]
else:
new_value = line[2] + data[2]
self.current_list[0] = (line[0], line[1], new_value, line[3], line[4])
def __readahead(self):
while len(self.current_list) < 5:
line = self.ifile.readline()
self.index += 1
if not line:
self.eof = True
return
try:
# According to the GEDCOM 5.5 standard,
# Chapter 1 subsection Grammar
#"leading whitespace preceeding a GEDCOM line should be ignored"
# We will also strip the terminator which is any combination
# of carriage_return and line_feed
line = line.lstrip(' ').rstrip('\n\r')
# split into level+delim+rest
line = line.partition(' ')
level = int(line[0])
# there should only be one space after the level,
# but we can ignore more,
line = line[2].lstrip(' ')
# then split into tag+delim+line_value
# or xfef_id+delim+rest
# the xref_id can have spaces in it
if line.startswith('@'):
line = line.split('@', 2)
# line is now [None, alphanum+pointer_string, rest]
tag = '@' + line[1] + '@'
line_value = line[2].lstrip()
## Ignore meaningless @IDENT@ on CONT or CONC line
## as noted at http://www.tamurajones.net/IdentCONT.xhtml
if (line_value.lstrip().startswith("CONT ") or
line_value.lstrip().startswith("CONC ")):
line = line_value.lstrip().partition(' ')
tag = line[0]
line_value = line[2]
else:
line = line.partition(' ')
tag = line[0]
line_value = line[2]
except:
continue
token = TOKENS.get(tag, TOKEN_UNKNOWN)
data = (level, token, line_value, tag, self.index)
func = self.func_map.get(data[1])
if func:
func(data)
else:
self.current_list.insert(0, data)
def clean_up(self):
"""
Break circular references to parsing methods stored in dictionaries
to aid garbage collection
"""
for key in list(self.func_map.keys()):
del self.func_map[key]
del self.func_map
#-----------------------------------------------------------------------
#
# GedLine - represents a tokenized version of a GEDCOM line
#
#-----------------------------------------------------------------------
class GedLine(object):
"""
GedLine is a class the represents a GEDCOM line. The form of a GEDCOM line
is:
<LEVEL> <TOKEN> <TEXT>
This gets parsed into
Line Number, Level, Token Value, Token Text, and Data
Data is dependent on the context the Token Value. For most of tokens,
this is just a text string. However, for certain tokens where we know
the context, we can provide some value. The current parsed tokens are:
TOKEN_DATE - Date
TOKEN_SEX - Person gender item
TOEKN_UKNOWN - Check to see if this is a known event
"""
__DATE_CNV = GedcomDateParser()
@staticmethod
def __extract_date(text):
"""
Converts the specified text to a Date object.
"""
dateobj = Date()
text = text.replace('BET ABT','EST BET') # Horrible hack for importing
# illegal GEDCOM from
# Apple Macintosh Classic
# 'Gene' program
# extract out the MOD line
match = MOD.match(text)
mod = ''
if match:
(mod, text) = match.groups()
qual = QUALITY_MAP.get(mod, Date.QUAL_NONE)
mod += ' '
else:
qual = Date.QUAL_NONE
# parse the range if we match, if so, return
match = RANGE.match(text)
match1 = RANGE1.match(text)
match2 = RANGE2.match(text)
if match or match1 or match2:
if match:
(cal1, data1, cal2, data2) = match.groups()
elif match1:
cal1 = Date.CAL_GREGORIAN
(data1, cal2, data2) = match1.groups()
elif match2:
cal2 = Date.CAL_GREGORIAN
(cal1, data1, data2) = match2.groups()
cal1 = CALENDAR_MAP_GEDCOM2XML.get(cal1, Date.CAL_GREGORIAN)
cal2 = CALENDAR_MAP_GEDCOM2XML.get(cal2, Date.CAL_GREGORIAN)
if cal1 != cal2:
#not supported by GRAMPS, import as text, we construct a string
# that the parser will not parse as a correct date
return GedLine.__DATE_CNV.parse('%sbetween %s%s and %s%s' %
(mod, data1, CALENDAR_MAP_WRONGSTRING.get(cal1, ''),
CALENDAR_MAP_WRONGSTRING.get(cal2, ''), data2))
#add hebrew, ... calendar so that months are recognized
data1 += CALENDAR_MAP_PARSESTRING.get(cal1, '')
data2 += CALENDAR_MAP_PARSESTRING.get(cal2, '')
start = GedLine.__DATE_CNV.parse(data1)
stop = GedLine.__DATE_CNV.parse(data2)
dateobj.set(Date.QUAL_NONE, Date.MOD_RANGE, cal1,
start.get_start_date() + stop.get_start_date())
dateobj.set_quality(qual)
return dateobj
# parse a span if we match
match = SPAN.match(text)
match1 = SPAN1.match(text)
match2 = SPAN2.match(text)
if match or match1 or match2:
if match:
(cal1, data1, cal2, data2) = match.groups()
elif match1:
cal1 = Date.CAL_GREGORIAN
(data1, cal2, data2) = match1.groups()
elif match2:
cal2 = Date.CAL_GREGORIAN
(cal1, data1, data2) = match2.groups()
cal1 = CALENDAR_MAP_GEDCOM2XML.get(cal1, Date.CAL_GREGORIAN)
cal2 = CALENDAR_MAP_GEDCOM2XML.get(cal2, Date.CAL_GREGORIAN)
if cal1 != cal2:
#not supported by GRAMPS, import as text, we construct a string
# that the parser will not parse as a correct date
return GedLine.__DATE_CNV.parse('%sfrom %s%s to %s%s' %
(mod, data1, CALENDAR_MAP_WRONGSTRING.get(cal1, ''),
CALENDAR_MAP_WRONGSTRING.get(cal2, ''), data2))
#add hebrew, ... calendar so that months are recognized
data1 += CALENDAR_MAP_PARSESTRING.get(cal1, '')
data2 += CALENDAR_MAP_PARSESTRING.get(cal2, '')
start = GedLine.__DATE_CNV.parse(data1)
stop = GedLine.__DATE_CNV.parse(data2)
dateobj.set(Date.QUAL_NONE, Date.MOD_SPAN, cal1,
start.get_start_date() + stop.get_start_date())
dateobj.set_quality(qual)
return dateobj
match = CAL.match(text)
if match:
(abt, call, data) = match.groups()
call = CALENDAR_MAP_GEDCOM2XML.get(call, Date.CAL_GREGORIAN)
data += CALENDAR_MAP_PARSESTRING.get(call, '')
if abt:
dateobj = GedLine.__DATE_CNV.parse("%s %s" % (abt, data))
else:
dateobj = GedLine.__DATE_CNV.parse(data)
dateobj.set_quality(qual)
return dateobj
dateobj = GedLine.__DATE_CNV.parse(text)
dateobj.set_quality(qual)
return dateobj
def __init__(self, data):
"""
If the level is 0, then this is a top level instance. In this case,
we may find items in the form of:
<LEVEL> @ID@ <ITEM>
If this is not the top level, we check the MAP_DATA array to see if
there is a conversion function for the data.
"""
self.line = data[4]
self.level = data[0]
self.token = data[1]
self.token_text = data[3].strip()
self.data = data[2]
if self.level == 0:
if (self.token_text and self.token_text[0] == '@' and
self.token_text[-1] == '@'):
self.token = TOKEN_ID
self.token_text = self.token_text[1:-1]
self.data = self.data.strip()
else:
func = _MAP_DATA.get(self.token)
if func:
func(self)
def calc_sex(self):
"""
Converts the data field to a gen.lib token indicating the gender
"""
try:
self.data = SEX_MAP.get(self.data.strip()[0],
Person.UNKNOWN)
except:
self.data = Person.UNKNOWN
def calc_date(self):
"""
Converts the data field to a Date object
"""
self.data = self.__extract_date(self.data)
def calc_unknown(self):
"""
Checks to see if the token maps a known GEDCOM event. If so, we
change the type from UNKNOWN to TOKEN_GEVENT (gedcom event), and
the data is assigned to the associated GRAMPS EventType
"""
token = GED_TO_GRAMPS_EVENT.get(self.token_text)
if token:
event = Event()
event.set_description(self.data)
event.set_type(token)
self.token = TOKEN_GEVENT
self.data = event
else:
token = GED_TO_GRAMPS_ATTR.get(self.token_text)
if token:
attr = Attribute()
attr.set_value(self.data)
attr.set_type(token)
self.token = TOKEN_ATTR
self.data = attr
def calc_note(self):
gid = self.data.strip()
if len(gid) > 2 and gid[0] == '@' and gid[-1] == '@':
self.token = TOKEN_RNOTE
self.data = gid[1:-1]
def calc_nchi(self):
attr = Attribute()
attr.set_value(self.data)
attr.set_type(AttributeType.NUM_CHILD)
self.data = attr
self.token = TOKEN_ATTR
def calc_attr(self):
attr = Attribute()
attr.set_value(self.data)
attr.set_type((AttributeType.CUSTOM, self.token_text))
self.data = attr
self.token = TOKEN_ATTR
def __repr__(self):
return "%d: %d (%d:%s) %s" % (self.line, self.level, self.token,
self.token_text, self.data)
_MAP_DATA = {
TOKEN_UNKNOWN : GedLine.calc_unknown,
TOKEN_DATE : GedLine.calc_date,
TOKEN_SEX : GedLine.calc_sex,
TOKEN_NOTE : GedLine.calc_note,
TOKEN_NCHI : GedLine.calc_nchi,
TOKEN__STAT : GedLine.calc_attr,
TOKEN__UID : GedLine.calc_attr,
TOKEN_AFN : GedLine.calc_attr,
TOKEN__FSFTID : GedLine.calc_attr,
}
#-------------------------------------------------------------------------
#
# GedcomDescription
#
#-------------------------------------------------------------------------
class GedcomDescription(object):
def __init__(self, name):
self.name = name
self.dest = ""
self.adopt = ADOPT_STD
self.conc = CONC_OK
self.altname = ALT_NAME_STD
self.cal = CALENDAR_YES
self.obje = OBJE_YES
self.resi = RESIDENCE_ADDR
self.source_refs = SOURCE_REFS_YES
self.gramps2tag_map = {}
self.tag2gramps_map = {}
self.prefix = PREFIX_YES
self.endl = "\n"
def set_dest(self, val):
self.dest = val
def get_dest(self):
return self.dest
def set_endl(self, val):
self.endl = val.replace('\\r','\r').replace('\\n','\n')
def get_endl(self):
return self.endl
def set_adopt(self, val):
self.adopt = val
def get_adopt(self):
return self.adopt
def set_prefix(self, val):
self.prefix = val
def get_prefix(self):
return self.prefix
def set_conc(self, val):
self.conc = val
def get_conc(self):
return self.conc
def set_alt_name(self, val):
self.altname = val
def get_alt_name(self):
return self.altname
def set_alt_calendar(self, val):
self.cal = val
def get_alt_calendar(self):
return self.cal
def set_obje(self, val):
self.obje = val
def get_obje(self):
return self.obje
def set_resi(self, val):
self.resi = val
def get_resi(self):
return self.resi
def set_source_refs(self, val):
self.source_refs = val
def get_source_refs(self):
return self.source_refs
def add_tag_value(self, tag, value):
self.gramps2tag_map[value] = tag
self.tag2gramps_map[tag] = value
def gramps2tag(self, key):
if key in self.gramps2tag_map:
return self.gramps2tag_map[key]
return ""
def tag2gramps(self, key):
if key in self.tag2gramps_map:
return self.tag2gramps_map[key]
return key
#-------------------------------------------------------------------------
#
# GedcomInfoDB
#
#-------------------------------------------------------------------------
class GedcomInfoDB(object):
def __init__(self):
self.map = {}
self.standard = GedcomDescription("GEDCOM 5.5 standard")
self.standard.set_dest("GEDCOM 5.5")
if sys.version_info[0] < 3:
try:
filepath = os.path.join(DATA_DIR, "gedcom.xml")
ged_file = open(filepath.encode('iso8859-1'), "r")
except:
return
else:
try:
filepath = os.path.join(DATA_DIR, "gedcom.xml")
ged_file = open(filepath, "rb")
except:
return
parser = GedInfoParser(self)
parser.parse(ged_file)
ged_file.close()
def add_description(self, name, obj):
self.map[name] = obj
def get_description(self, name):
if name in self.map:
return self.map[name]
return self.standard
def get_from_source_tag(self, name):
for k, val in self.map.items():
if val.get_dest() == name:
return val
return self.standard
def get_name_list(self):
return ["GEDCOM 5.5 standard"] + sorted(self.map)
#-------------------------------------------------------------------------
#
# GedInfoParser
#
#-------------------------------------------------------------------------
class GedInfoParser(object):
def __init__(self, parent):
self.parent = parent
self.current = None
def parse(self, ged_file):
p = ParserCreate()
p.StartElementHandler = self.startElement
p.ParseFile(ged_file)
def startElement(self, tag, attrs):
if tag == "target":
name = attrs['name']
self.current = GedcomDescription(name)
self.parent.add_description(name, self.current)
elif tag == "dest":
self.current.set_dest(attrs['val'])
elif tag == "endl":
self.current.set_endl(attrs['val'])
elif tag == "adopt":
val = attrs['val']
if val == 'none':
self.current.set_adopt(ADOPT_NONE)
elif val == 'event':
self.current.set_adopt(ADOPT_EVENT)
elif val == 'ftw':
self.current.set_adopt(ADOPT_FTW)
elif val == 'legacy':
self.current.set_adopt(ADOPT_LEGACY)
elif val == 'pedigree':
self.current.set_adopt(ADOPT_PEDI)
elif tag == "conc":
if attrs['val'] == 'broken':
self.current.set_conc(CONC_BROKEN)
elif tag == "alternate_names":
val = attrs['val']
if val == 'none':
self.current.set_alt_name(ALT_NAME_NONE)
elif val == 'event_aka':
self.current.set_alt_name(ALT_NAME_EVENT_AKA)
elif val == 'alias':
self.current.set_alt_name(ALT_NAME_ALIAS)
elif val == 'aka':
self.current.set_alt_name(ALT_NAME_AKA)
elif val == '_alias':
self.current.set_alt_name(ALT_NAME_UALIAS)
elif tag == "calendars":
if attrs['val'] == 'no':
self.current.set_alt_calendar(CALENDAR_NO)
elif tag == "event":
self.current.add_tag_value(attrs['tag'], attrs['value'])
elif tag == "object_support":
if attrs['val'] == 'no':
self.current.set_obje(OBJE_NO)
elif tag == "prefix":
if attrs['val'] == 'no':
self.current.set_obje(PREFIX_NO)
elif tag == "residence":
if attrs['val'] == 'place':
self.current.set_resi(RESIDENCE_PLAC)
elif tag == "source_refs":
if attrs['val'] == 'no':
self.current.set_source_refs(SOURCE_REFS_NO)
#-------------------------------------------------------------------------
#
# File Readers
#
#-------------------------------------------------------------------------
class BaseReader(object):
def __init__(self, ifile, encoding):
self.ifile = ifile
self.enc = encoding
def reset(self):
self.ifile.seek(0)
def readline(self):
if sys.version_info[0] < 3:
line = unicode(self.ifile.readline(),
encoding=self.enc,
errors='replace')
else:
line = self.ifile.readline()
line = line.decode(self.enc, errors='replace')
return line.translate(STRIP_DICT)
class UTF8Reader(BaseReader):
def __init__(self, ifile):
BaseReader.__init__(self, ifile, 'utf8')
self.reset()
def reset(self):
self.ifile.seek(0)
data = self.ifile.read(3)
if data != b"\xef\xbb\xbf":
self.ifile.seek(0)
def readline(self):
line = self.ifile.readline()
if sys.version_info[0] < 3:
line = unicode(line,
encoding=self.enc,
errors='replace')
else:
line = line.decode(self.enc, errors='replace')
return line.translate(STRIP_DICT)
class UTF16Reader(BaseReader):
def __init__(self, ifile):
new_file = codecs.EncodedFile(ifile, 'utf8', 'utf16')
BaseReader.__init__(self, new_file, 'utf16')
self.reset()
def readline(self):
l = self.ifile.readline()
if l.strip():
return l
else:
return self.ifile.readline()
class AnsiReader(BaseReader):
def __init__(self, ifile):
BaseReader.__init__(self, ifile, 'latin1')
class AnselReader(BaseReader):
"""
ANSEL to Unicode Conversion
ANSEL references:
http://lcweb2.loc.gov/diglib/codetables/45.html
http://www.gymel.com/charsets/ANSEL.html
list of ANSEL codes that replicate ASCII
note that DEL (127=0x7F) is a control char
Note: spec allows control-chars that Gramps probably doesn't use
but 10=0x0A _is_ needed (!)
---
Also: there are two additional control chars 0x98,0x9c (unicode same)
which we also ignore for now (start/emd of string (or sort sequence)
---
TODO: should we allow TAB, as a Gramps extension?
"""
__printable_ascii = list(map(chr, list(range(32, 127)))) # note: up thru 126
__use_ASCII = list(map(chr, [10, 27, 29 , 30, 31])) + __printable_ascii
# mappings of single byte ANSEL codes to unicode
__onebyte = {
b'\xA1' : '\u0141', b'\xA2' : '\u00d8', b'\xA3' : '\u0110',
b'\xA4' : '\u00de', b'\xA5' : '\u00c6', b'\xA6' : '\u0152',
b'\xA7' : '\u02b9', b'\xA8' : '\u00b7', b'\xA9' : '\u266d',
b'\xAA' : '\u00ae', b'\xAB' : '\u00b1', b'\xAC' : '\u01a0',
b'\xAD' : '\u01af', b'\xAE' : '\u02bc', b'\xB0' : '\u02bb',
b'\xB1' : '\u0142', b'\xB2' : '\u00f8', b'\xB3' : '\u0111',
b'\xB4' : '\u00fe', b'\xB5' : '\u00e6', b'\xB6' : '\u0153',
b'\xB7' : '\u02ba', b'\xB8' : '\u0131', b'\xB9' : '\u00a3',
b'\xBA' : '\u00f0', b'\xBC' : '\u01a1', b'\xBD' : '\u01b0',
b'\xC0' : '\u00b0', b'\xC1' : '\u2113', b'\xC2' : '\u2117',
b'\xC3' : '\u00a9', b'\xC4' : '\u266f', b'\xC5' : '\u00bf',
b'\xC6' : '\u00a1', b'\xC7' : '\u00df', b'\xC8' : '\u20ac',
}
# combining forms (in ANSEL, they precede the modified ASCII character
# whereas the unicode combining term follows the character modified
# Note: unicode allows multiple modifiers, but ANSEL may not (TDB?),
# so we ignore multiple combining forms in this module
# 8d & 8e are zero-width joiner (ZWJ), and zero-width non-joiner ZWNJ
# (strange things) probably not commonly found in our needs, unless one
# starts writing persian (or???) poetry in ANSEL
__acombiners = {
b'\x8D' : '\u200d', b'\x8E' : '\u200c', b'\xE0' : '\u0309',
b'\xE1' : '\u0300', b'\xE2' : '\u0301', b'\xE3' : '\u0302',
b'\xE4' : '\u0303', b'\xE5' : '\u0304', b'\xE6' : '\u0306',
b'\xE7' : '\u0307', b'\xE8' : '\u0308', b'\xE9' : '\u030c',
b'\xEA' : '\u030a', b'\xEB' : '\ufe20', b'\xEC' : '\ufe21',
b'\xED' : '\u0315', b'\xEE' : '\u030b', b'\xEF' : '\u0310',
b'\xF0' : '\u0327', b'\xF1' : '\u0328', b'\xF2' : '\u0323',
b'\xF3' : '\u0324', b'\xF4' : '\u0325', b'\xF5' : '\u0333',
b'\xF6' : '\u0332', b'\xF7' : '\u0326', b'\xF8' : '\u031c',
b'\xF9' : '\u032e', b'\xFA' : '\ufe22', b'\xFB' : '\ufe23',
b'\xFE' : '\u0313',
}
# mappings of two byte (precomposed forms) ANSEL codes to unicode
__twobyte = {
b'\xE0\x41' : '\u1ea2', b'\xE0\x45' : '\u1eba',
b'\xE0\x49' : '\u1ec8', b'\xE0\x4F' : '\u1ece',
b'\xE0\x55' : '\u1ee6', b'\xE0\x59' : '\u1ef6',
b'\xE0\x61' : '\u1ea3', b'\xE0\x65' : '\u1ebb',
b'\xE0\x69' : '\u1ec9', b'\xE0\x6F' : '\u1ecf',
b'\xE0\x75' : '\u1ee7', b'\xE0\x79' : '\u1ef7',
b'\xE1\x41' : '\u00c0', b'\xE1\x45' : '\u00c8',
b'\xE1\x49' : '\u00cc', b'\xE1\x4F' : '\u00d2',
b'\xE1\x55' : '\u00d9', b'\xE1\x57' : '\u1e80',
b'\xE1\x59' : '\u1ef2', b'\xE1\x61' : '\u00e0',
b'\xE1\x65' : '\u00e8', b'\xE1\x69' : '\u00ec',
b'\xE1\x6F' : '\u00f2', b'\xE1\x75' : '\u00f9',
b'\xE1\x77' : '\u1e81', b'\xE1\x79' : '\u1ef3',
b'\xE2\x41' : '\u00c1', b'\xE2\x43' : '\u0106',
b'\xE2\x45' : '\u00c9', b'\xE2\x47' : '\u01f4',
b'\xE2\x49' : '\u00cd', b'\xE2\x4B' : '\u1e30',
b'\xE2\x4C' : '\u0139', b'\xE2\x4D' : '\u1e3e',
b'\xE2\x4E' : '\u0143', b'\xE2\x4F' : '\u00d3',
b'\xE2\x50' : '\u1e54', b'\xE2\x52' : '\u0154',
b'\xE2\x53' : '\u015a', b'\xE2\x55' : '\u00da',
b'\xE2\x57' : '\u1e82', b'\xE2\x59' : '\u00dd',
b'\xE2\x5A' : '\u0179', b'\xE2\x61' : '\u00e1',
b'\xE2\x63' : '\u0107', b'\xE2\x65' : '\u00e9',
b'\xE2\x67' : '\u01f5', b'\xE2\x69' : '\u00ed',
b'\xE2\x6B' : '\u1e31', b'\xE2\x6C' : '\u013a',
b'\xE2\x6D' : '\u1e3f', b'\xE2\x6E' : '\u0144',
b'\xE2\x6F' : '\u00f3', b'\xE2\x70' : '\u1e55',
b'\xE2\x72' : '\u0155', b'\xE2\x73' : '\u015b',
b'\xE2\x75' : '\u00fa', b'\xE2\x77' : '\u1e83',
b'\xE2\x79' : '\u00fd', b'\xE2\x7A' : '\u017a',
b'\xE2\xA5' : '\u01fc', b'\xE2\xB5' : '\u01fd',
b'\xE3\x41' : '\u00c2', b'\xE3\x43' : '\u0108',
b'\xE3\x45' : '\u00ca', b'\xE3\x47' : '\u011c',
b'\xE3\x48' : '\u0124', b'\xE3\x49' : '\u00ce',
b'\xE3\x4A' : '\u0134', b'\xE3\x4F' : '\u00d4',
b'\xE3\x53' : '\u015c', b'\xE3\x55' : '\u00db',
b'\xE3\x57' : '\u0174', b'\xE3\x59' : '\u0176',
b'\xE3\x5A' : '\u1e90', b'\xE3\x61' : '\u00e2',
b'\xE3\x63' : '\u0109', b'\xE3\x65' : '\u00ea',
b'\xE3\x67' : '\u011d', b'\xE3\x68' : '\u0125',
b'\xE3\x69' : '\u00ee', b'\xE3\x6A' : '\u0135',
b'\xE3\x6F' : '\u00f4', b'\xE3\x73' : '\u015d',
b'\xE3\x75' : '\u00fb', b'\xE3\x77' : '\u0175',
b'\xE3\x79' : '\u0177', b'\xE3\x7A' : '\u1e91',
b'\xE4\x41' : '\u00c3', b'\xE4\x45' : '\u1ebc',
b'\xE4\x49' : '\u0128', b'\xE4\x4E' : '\u00d1',
b'\xE4\x4F' : '\u00d5', b'\xE4\x55' : '\u0168',
b'\xE4\x56' : '\u1e7c', b'\xE4\x59' : '\u1ef8',
b'\xE4\x61' : '\u00e3', b'\xE4\x65' : '\u1ebd',
b'\xE4\x69' : '\u0129', b'\xE4\x6E' : '\u00f1',
b'\xE4\x6F' : '\u00f5', b'\xE4\x75' : '\u0169',
b'\xE4\x76' : '\u1e7d', b'\xE4\x79' : '\u1ef9',
b'\xE5\x41' : '\u0100', b'\xE5\x45' : '\u0112',
b'\xE5\x47' : '\u1e20', b'\xE5\x49' : '\u012a',
b'\xE5\x4F' : '\u014c', b'\xE5\x55' : '\u016a',
b'\xE5\x61' : '\u0101', b'\xE5\x65' : '\u0113',
b'\xE5\x67' : '\u1e21', b'\xE5\x69' : '\u012b',
b'\xE5\x6F' : '\u014d', b'\xE5\x75' : '\u016b',
b'\xE5\xA5' : '\u01e2', b'\xE5\xB5' : '\u01e3',
b'\xE6\x41' : '\u0102', b'\xE6\x45' : '\u0114',
b'\xE6\x47' : '\u011e', b'\xE6\x49' : '\u012c',
b'\xE6\x4F' : '\u014e', b'\xE6\x55' : '\u016c',
b'\xE6\x61' : '\u0103', b'\xE6\x65' : '\u0115',
b'\xE6\x67' : '\u011f', b'\xE6\x69' : '\u012d',
b'\xE6\x6F' : '\u014f', b'\xE6\x75' : '\u016d',
b'\xE7\x42' : '\u1e02', b'\xE7\x43' : '\u010a',
b'\xE7\x44' : '\u1e0a', b'\xE7\x45' : '\u0116',
b'\xE7\x46' : '\u1e1e', b'\xE7\x47' : '\u0120',
b'\xE7\x48' : '\u1e22', b'\xE7\x49' : '\u0130',
b'\xE7\x4D' : '\u1e40', b'\xE7\x4E' : '\u1e44',
b'\xE7\x50' : '\u1e56', b'\xE7\x52' : '\u1e58',
b'\xE7\x53' : '\u1e60', b'\xE7\x54' : '\u1e6a',
b'\xE7\x57' : '\u1e86', b'\xE7\x58' : '\u1e8a',
b'\xE7\x59' : '\u1e8e', b'\xE7\x5A' : '\u017b',
b'\xE7\x62' : '\u1e03', b'\xE7\x63' : '\u010b',
b'\xE7\x64' : '\u1e0b', b'\xE7\x65' : '\u0117',
b'\xE7\x66' : '\u1e1f', b'\xE7\x67' : '\u0121',
b'\xE7\x68' : '\u1e23', b'\xE7\x6D' : '\u1e41',
b'\xE7\x6E' : '\u1e45', b'\xE7\x70' : '\u1e57',
b'\xE7\x72' : '\u1e59', b'\xE7\x73' : '\u1e61',
b'\xE7\x74' : '\u1e6b', b'\xE7\x77' : '\u1e87',
b'\xE7\x78' : '\u1e8b', b'\xE7\x79' : '\u1e8f',
b'\xE7\x7A' : '\u017c', b'\xE8\x41' : '\u00c4',
b'\xE8\x45' : '\u00cb', b'\xE8\x48' : '\u1e26',
b'\xE8\x49' : '\u00cf', b'\xE8\x4F' : '\u00d6',
b'\xE8\x55' : '\u00dc', b'\xE8\x57' : '\u1e84',
b'\xE8\x58' : '\u1e8c', b'\xE8\x59' : '\u0178',
b'\xE8\x61' : '\u00e4', b'\xE8\x65' : '\u00eb',
b'\xE8\x68' : '\u1e27', b'\xE8\x69' : '\u00ef',
b'\xE8\x6F' : '\u00f6', b'\xE8\x74' : '\u1e97',
b'\xE8\x75' : '\u00fc', b'\xE8\x77' : '\u1e85',
b'\xE8\x78' : '\u1e8d', b'\xE8\x79' : '\u00ff',
b'\xE9\x41' : '\u01cd', b'\xE9\x43' : '\u010c',
b'\xE9\x44' : '\u010e', b'\xE9\x45' : '\u011a',
b'\xE9\x47' : '\u01e6', b'\xE9\x49' : '\u01cf',
b'\xE9\x4B' : '\u01e8', b'\xE9\x4C' : '\u013d',
b'\xE9\x4E' : '\u0147', b'\xE9\x4F' : '\u01d1',
b'\xE9\x52' : '\u0158', b'\xE9\x53' : '\u0160',
b'\xE9\x54' : '\u0164', b'\xE9\x55' : '\u01d3',
b'\xE9\x5A' : '\u017d', b'\xE9\x61' : '\u01ce',
b'\xE9\x63' : '\u010d', b'\xE9\x64' : '\u010f',
b'\xE9\x65' : '\u011b', b'\xE9\x67' : '\u01e7',
b'\xE9\x69' : '\u01d0', b'\xE9\x6A' : '\u01f0',
b'\xE9\x6B' : '\u01e9', b'\xE9\x6C' : '\u013e',
b'\xE9\x6E' : '\u0148', b'\xE9\x6F' : '\u01d2',
b'\xE9\x72' : '\u0159', b'\xE9\x73' : '\u0161',
b'\xE9\x74' : '\u0165', b'\xE9\x75' : '\u01d4',
b'\xE9\x7A' : '\u017e', b'\xEA\x41' : '\u00c5',
b'\xEA\x61' : '\u00e5', b'\xEA\x75' : '\u016f',
b'\xEA\x77' : '\u1e98', b'\xEA\x79' : '\u1e99',
b'\xEA\xAD' : '\u016e', b'\xEE\x4F' : '\u0150',
b'\xEE\x55' : '\u0170', b'\xEE\x6F' : '\u0151',
b'\xEE\x75' : '\u0171', b'\xF0\x20' : '\u00b8',
b'\xF0\x43' : '\u00c7', b'\xF0\x44' : '\u1e10',
b'\xF0\x47' : '\u0122', b'\xF0\x48' : '\u1e28',
b'\xF0\x4B' : '\u0136', b'\xF0\x4C' : '\u013b',
b'\xF0\x4E' : '\u0145', b'\xF0\x52' : '\u0156',
b'\xF0\x53' : '\u015e', b'\xF0\x54' : '\u0162',
b'\xF0\x63' : '\u00e7', b'\xF0\x64' : '\u1e11',
b'\xF0\x67' : '\u0123', b'\xF0\x68' : '\u1e29',
b'\xF0\x6B' : '\u0137', b'\xF0\x6C' : '\u013c',
b'\xF0\x6E' : '\u0146', b'\xF0\x72' : '\u0157',
b'\xF0\x73' : '\u015f', b'\xF0\x74' : '\u0163',
b'\xF1\x41' : '\u0104', b'\xF1\x45' : '\u0118',
b'\xF1\x49' : '\u012e', b'\xF1\x4F' : '\u01ea',
b'\xF1\x55' : '\u0172', b'\xF1\x61' : '\u0105',
b'\xF1\x65' : '\u0119', b'\xF1\x69' : '\u012f',
b'\xF1\x6F' : '\u01eb', b'\xF1\x75' : '\u0173',
b'\xF2\x41' : '\u1ea0', b'\xF2\x42' : '\u1e04',
b'\xF2\x44' : '\u1e0c', b'\xF2\x45' : '\u1eb8',
b'\xF2\x48' : '\u1e24', b'\xF2\x49' : '\u1eca',
b'\xF2\x4B' : '\u1e32', b'\xF2\x4C' : '\u1e36',
b'\xF2\x4D' : '\u1e42', b'\xF2\x4E' : '\u1e46',
b'\xF2\x4F' : '\u1ecc', b'\xF2\x52' : '\u1e5a',
b'\xF2\x53' : '\u1e62', b'\xF2\x54' : '\u1e6c',
b'\xF2\x55' : '\u1ee4', b'\xF2\x56' : '\u1e7e',
b'\xF2\x57' : '\u1e88', b'\xF2\x59' : '\u1ef4',
b'\xF2\x5A' : '\u1e92', b'\xF2\x61' : '\u1ea1',
b'\xF2\x62' : '\u1e05', b'\xF2\x64' : '\u1e0d',
b'\xF2\x65' : '\u1eb9', b'\xF2\x68' : '\u1e25',
b'\xF2\x69' : '\u1ecb', b'\xF2\x6B' : '\u1e33',
b'\xF2\x6C' : '\u1e37', b'\xF2\x6D' : '\u1e43',
b'\xF2\x6E' : '\u1e47', b'\xF2\x6F' : '\u1ecd',
b'\xF2\x72' : '\u1e5b', b'\xF2\x73' : '\u1e63',
b'\xF2\x74' : '\u1e6d', b'\xF2\x75' : '\u1ee5',
b'\xF2\x76' : '\u1e7f', b'\xF2\x77' : '\u1e89',
b'\xF2\x79' : '\u1ef5', b'\xF2\x7A' : '\u1e93',
b'\xF3\x55' : '\u1e72', b'\xF3\x75' : '\u1e73',
b'\xF4\x41' : '\u1e00', b'\xF4\x61' : '\u1e01',
b'\xF9\x48' : '\u1e2a', b'\xF9\x68' : '\u1e2b',
}
@staticmethod
def __ansel_to_unicode(s):
""" Convert an ANSEL encoded string to unicode """
buff = StringIO()
while s:
if ord(s[0]) < 128:
if s[0] in AnselReader.__use_ASCII:
head = s[0]
else:
# substitute space for disallowed (control) chars
head = ' '
s = s[1:]
else:
if s[0:2] in AnselReader.__twobyte:
head = AnselReader.__twobyte[s[0:2]]
s = s[2:]
elif s[0] in AnselReader.__onebyte:
head = AnselReader.__onebyte[s[0]]
s = s[1:]
elif s[0] in AnselReader.__acombiners:
c = AnselReader.__acombiners[s[0]]
# always consume the combiner
s = s[1:]
next = s[0]
if next in AnselReader.__printable_ascii:
# consume next as well
s = s[1:]
# unicode: combiner follows base-char
head = next + c
else:
# just drop the unexpected combiner
continue
else:
head = '\ufffd' # "Replacement Char"
s = s[1:]
buff.write(head.encode("utf-8"))
if sys.version_info[0] < 3:
ans = unicode(buff.getvalue(), "utf-8")
else:
ans = buff.getvalue().decode("utf-8")
buff.close()
return ans
def __init__(self, ifile):
BaseReader.__init__(self, ifile, "")
def readline(self):
return self.__ansel_to_unicode(self.ifile.readline())
#-------------------------------------------------------------------------
#
# CurrentState
#
#-------------------------------------------------------------------------
class CurrentState(object):
"""
Keep track of the current state variables.
"""
def __init__(self, person=None, level=0, event=None, event_ref=None):
"""
Initialize the object.
"""
self.name_cnt = 0
self.person = person
self.family = None
self.level = level
self.event = event
self.event_ref = event_ref
self.source_ref = None
self.citation = None
self.note = None
self.lds_ord = None
self.msg = ""
self.primary = False # _PRIM tag on an INDI.FAMC tag
self.filename = ""
self.title = ""
self.addr = None
self.res = None
self.source = None
self.ftype = None
self.pf = None # method for parsing places
self.location = None
self.place_fields = None # method for parsing places
self.ref = None # PersonRef
self.handle = None #
self.form = "" # Multimedia format
self.frel = None # Child relation to father
self.mrel = None
self.repo = None
self.attr = None
self.obj = None
self.name = ""
self.ignore = False
self.repo_ref = None
self.place = None
self.media = None
def __getattr__(self, name):
"""
Return the value associated with the specified attribute.
"""
return self.__dict__.get(name)
def __setattr__(self, name, value):
"""
Set the value associated with the specified attribute.
"""
self.__dict__[name] = value
#-------------------------------------------------------------------------
#
# PlaceParser
#
#-------------------------------------------------------------------------
class PlaceParser(object):
"""
Provide the ability to parse GEDCOM FORM statements for places, and
the parse the line of text, mapping the text components to Location
values based of the FORM statement.
"""
__field_map = {
'addr' : Location.set_street,
'subdivision' : Location.set_street,
'addr1' : Location.set_street,
'adr1' : Location.set_street,
'street' : Location.set_street,
'addr2' : Location.set_locality,
'adr2' : Location.set_locality,
'locality' : Location.set_locality,
'neighborhood' : Location.set_locality,
'city' : Location.set_city,
'town' : Location.set_city,
'village' : Location.set_city,
'county' : Location.set_county,
'country' : Location.set_country,
'state' : Location.set_state,
'state/province': Location.set_state,
'region' : Location.set_state,
'province' : Location.set_state,
'area code' : Location.set_postal_code,
'post code' : Location.set_postal_code,
'zip code' : Location.set_postal_code,
}
def __init__(self, line=None):
self.parse_function = []
if line:
self.parse_form(line)
def parse_form(self, line):
"""
Parses the GEDCOM PLAC.FORM into a list of function
pointers (if possible). It does this my mapping the text strings
(separated by commas) to the corresponding Location
method via the __field_map variable
"""
for item in line.data.split(','):
item = item.lower().strip()
fcn = self.__field_map.get(item, lambda x, y: None)
self.parse_function.append(fcn)
def load_place(self, place_import, place, text):
"""
Takes the text string representing a place, splits it into
its subcomponents (comma separated), and calls the approriate
function based of its position, depending on the parsed value
from the FORM statement.
"""
items = [item.strip() for item in text.split(',')]
if len(items) != len(self.parse_function):
return
index = 0
loc = Location()
for item in items:
self.parse_function[index](loc, item)
index += 1
location = (loc.get_street(),
loc.get_locality(),
loc.get_parish(),
loc.get_city(),
loc.get_county(),
loc.get_state(),
loc.get_country())
place_import.store_location(location, place.handle)
for level, name in enumerate(location):
if name:
break
place.set_name(name)
type_num = 7 - level if name else PlaceType.UNKNOWN
place.set_type(PlaceType(type_num))
code = loc.get_postal_code()
place.set_code(code)
#-------------------------------------------------------------------------
#
# IdFinder
#
#-------------------------------------------------------------------------
class IdFinder(object):
"""
Provide method of finding the next available ID.
"""
def __init__(self, keys, prefix):
"""
Initialize the object.
"""
self.ids = set(keys)
self.index = 0
self.prefix = prefix
def find_next(self):
"""
Return the next available GRAMPS' ID for a Event object based
off the person ID prefix.
@return: Returns the next available index
@rtype: str
"""
index = self.prefix % self.index
while str(index) in self.ids:
self.index += 1
index = self.prefix % self.index
self.ids.add(index)
self.index += 1
return index
#-------------------------------------------------------------------------
#
# IdMapper
#
#-------------------------------------------------------------------------
class IdMapper(object):
def __init__(self, trans, find_next, id2user_format):
self.trans = trans
self.find_next = find_next
self.id2user_format = id2user_format
self.swap = {}
def __getitem__(self, gid):
if gid == "":
# We need to find the next gramps ID provided it is not already
# the target of a swap
new_val = self.find_next()
while new_val in list(self.swap.values()):
new_val = self.find_next()
else:
# remove any @ signs
gid = self.clean(gid)
if gid in self.swap:
return self.swap[gid]
else:
# now standardise the format
formatted_gid = self.id2user_format(gid)
# I1 and I0001 will both format as I0001. If we had already
# encountered I1, it would be in self.swap, so we would already
# have found it. If we had already encountered I0001 and we are
# now looking for I1, it wouldn't be in self.swap, and we now
# find that I0001 is in use, so we have to create a new id.
bformatted_gid = formatted_gid
if isinstance(bformatted_gid, UNITYPE):
bformatted_gid = bformatted_gid.encode('utf-8')
if self.trans.get(bformatted_gid) or \
(formatted_gid in list(self.swap.values())):
new_val = self.find_next()
while new_val in list(self.swap.values()):
new_val = self.find_next()
else:
new_val = formatted_gid
# we need to distinguish between I1 and I0001, so we record the map
# from the original format
self.swap[gid] = new_val
return new_val
def clean(self, gid):
temp = gid.strip()
if len(temp) > 1 and temp[0] == '@' and temp[-1] == '@':
temp = temp[1:-1]
return temp
def map(self):
return self.swap
#-------------------------------------------------------------------------
#
# GedcomParser
#
#-------------------------------------------------------------------------
class GedcomParser(UpdateCallback):
"""
Performs the second pass of the GEDCOM parser, which does all the heavy
lifting.
"""
__TRUNC_MSG = _("Your GEDCOM file is corrupted. "
"It appears to have been truncated.")
SyntaxError = "Syntax Error"
BadFile = "Not a GEDCOM file"
@staticmethod
def __find_from_handle(gramps_id, table):
"""
Find a handle corresponding to the specified GRAMPS ID.
The passed table contains the mapping. If the value is found, we return
it, otherwise we create a new handle, store it, and return it.
"""
intid = table.get(gramps_id)
if not intid:
intid = create_id()
table[gramps_id] = intid
return intid
@staticmethod
def __parse_name_personal(text):
"""
Parses a GEDCOM NAME value into an Name structure
"""
name = Name()
match = SURNAME_RE.match(text)
if match:
#/surname/ extra, we assume extra is given name
names = match.groups()
name.set_first_name(names[1].strip())
surn = Surname()
surn.set_surname(names[0].strip())
surn.set_primary()
name.set_surname_list([surn])
else:
try:
names = NAME_RE.match(text).groups()
# given /surname/ extra, we assume extra is suffix
name.set_first_name(names[0].strip())
surn = Surname()
surn.set_surname(names[2].strip())
surn.set_primary()
name.set_surname_list([surn])
name.set_suffix(names[4].strip())
except:
# something strange, set as first name
name.set_first_name(text.strip())
return name
def __init__(self, dbase, ifile, filename, user, stage_one,
default_source, default_tag_format=None):
UpdateCallback.__init__(self, user.callback)
self.user = user
self.set_total(stage_one.get_line_count())
self.repo2id = {}
self.trans = None
self.errors = []
self.number_of_errors = 0
self.maxpeople = stage_one.get_person_count()
self.dbase = dbase
self.emapper = IdFinder(dbase.get_gramps_ids(EVENT_KEY),
dbase.event_prefix)
self.famc_map = stage_one.get_famc_map()
self.fams_map = stage_one.get_fams_map()
self.place_parser = PlaceParser()
self.inline_srcs = {}
self.media_map = {}
self.genby = ""
self.genvers = ""
self.subm = ""
self.gedmap = GedcomInfoDB()
self.gedsource = self.gedmap.get_from_source_tag('GEDCOM 5.5')
self.use_def_src = default_source
self.func_list = []
if self.use_def_src:
self.def_src = Source()
fname = os.path.basename(filename).split('\\')[-1]
self.def_src.set_title(_("Import from GEDCOM (%s)") % fname)
if default_tag_format:
name = time.strftime(default_tag_format)
tag = self.dbase.get_tag_from_name(name)
if tag:
self.default_tag = tag
else:
self.default_tag = Tag()
self.default_tag.set_name(name)
else:
self.default_tag = None
self.dir_path = os.path.dirname(filename)
self.is_ftw = False
self.groups = None
self.want_parse_warnings = True
self.pid_map = IdMapper(
self.dbase.id_trans,
self.dbase.find_next_person_gramps_id,
self.dbase.id2user_format)
self.fid_map = IdMapper(
self.dbase.fid_trans,
self.dbase.find_next_family_gramps_id,
self.dbase.fid2user_format)
self.sid_map = IdMapper(
self.dbase.sid_trans,
self.dbase.find_next_source_gramps_id,
self.dbase.sid2user_format)
self.oid_map = IdMapper(
self.dbase.oid_trans,
self.dbase.find_next_object_gramps_id,
self.dbase.oid2user_format)
self.rid_map = IdMapper(
self.dbase.rid_trans,
self.dbase.find_next_repository_gramps_id,
self.dbase.rid2user_format)
self.nid_map = IdMapper(
self.dbase.nid_trans,
self.dbase.find_next_note_gramps_id,
self.dbase.nid2user_format)
self.gid2id = {}
self.oid2id = {}
self.sid2id = {}
self.lid2id = {}
self.fid2id = {}
self.rid2id = {}
self.nid2id = {}
self.place_import = PlaceImport(self.dbase)
#
# Parse table for <<SUBMITTER_RECORD>> below the level 0 SUBM tag
#
# n @<XREF:SUBM>@ SUBM {1:1}
# +1 NAME <SUBMITTER_NAME> {1:1}
# +1 <<ADDRESS_STRUCTURE>> {0:1}
# +1 <<MULTIMEDIA_LINK>> {0:M}
# +1 LANG <LANGUAGE_PREFERENCE> {0:3}
# +1 RFN <SUBMITTER_REGISTERED_RFN> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
# (N.B. GEDCOM allows multiple SUBMitter records)
self.subm_parse_tbl = {
# +1 NAME <SUBMITTER_NAME>
TOKEN_NAME : self.__subm_name,
# +1 <<ADDRESS_STRUCTURE>>
TOKEN_ADDR : self.__subm_addr,
TOKEN_PHON : self.__subm_phon,
TOKEN_EMAIL : self.__subm_email,
# +1 <<MULTIMEDIA_LINK>>
# +1 LANG <LANGUAGE_PREFERENCE>
# +1 RFN <SUBMITTER_REGISTERED_RFN>
# +1 RIN <AUTOMATED_RECORD_ID>
# +1 <<CHANGE_DATE>>
TOKEN_CHAN : self.__repo_chan,
}
self.func_list.append(self.subm_parse_tbl)
#
# Parse table for <<INDIVIDUAL_RECORD>> below the level 0 INDI tag
#
# n @<XREF:INDI>@ INDI {1:1}
# +1 RESN <RESTRICTION_NOTICE> {0:1}
# +1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
# +1 SEX <SEX_VALUE> {0:1}
# +1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
# +1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
# +1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
# +1 <<CHILD_TO_FAMILY_LINK>> {0:M}
# +1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
# +1 SUBM @<XREF:SUBM>@ {0:M}
# +1 <<ASSOCIATION_STRUCTURE>> {0:M}
# +1 ALIA @<XREF:INDI>@ {0:M}
# +1 ANCI @<XREF:SUBM>@ {0:M}
# +1 DESI @<XREF:SUBM>@ {0:M}
# +1 <<SOURCE_CITATION>> {0:M}
# +1 <<MULTIMEDIA_LINK>> {0:M}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
# +1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
self.indi_parse_tbl = {
# +1 RESN <RESTRICTION_NOTICE> {0:1}
TOKEN_RESN : self.__person_resn,
# +1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
TOKEN_NAME : self.__person_name,
# +1 SEX <SEX_VALUE> {0:1}
TOKEN_SEX : self.__person_sex,
# +1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
TOKEN_EVEN : self.__person_even,
TOKEN_GEVENT: self.__person_std_event,
TOKEN_BIRT : self.__person_birt,
TOKEN_RELI : self.__person_reli,
TOKEN_ADOP : self.__person_adop,
TOKEN_DEAT : self.__person_deat,
# +1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
# +1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
TOKEN_ATTR : self.__person_std_attr,
TOKEN_FACT : self.__person_fact,
#+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
TOKEN_BAPL : self.__person_bapl,
TOKEN_CONL : self.__person_conl,
TOKEN_ENDL : self.__person_endl,
TOKEN_SLGC : self.__person_slgc,
#+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
TOKEN_FAMC : self.__person_famc,
# +1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
TOKEN_FAMS : self.__person_fams,
# +1 SUBM @<XREF:SUBM>@ {0:M}
TOKEN_SUBM : self.__skip_record,
# +1 <<ASSOCIATION_STRUCTURE>> {0:M}
TOKEN_ASSO : self.__person_asso,
# +1 ALIA @<XREF:INDI>@ {0:M}
TOKEN_ALIA : self.__person_alt_name,
# +1 ANCI @<XREF:SUBM>@ {0:M}
TOKEN_ANCI : self.__skip_record,
# +1 DESI @<XREF:SUBM>@ {0:M}
TOKEN_DESI : self.__skip_record,
# +1 <<SOURCE_CITATION>> {0:M}
TOKEN_SOUR : self.__person_sour,
# +1 <<MULTIMEDIA_LINK>> {0:M}
TOKEN_OBJE : self.__person_object,
# +1 <<NOTE_STRUCTURE>> {0:M}
TOKEN_NOTE : self.__person_note,
TOKEN_RNOTE : self.__person_note,
TOKEN__COMM : self.__person_note,
# +1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
TOKEN_RFN : self.__person_attr,
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
TOKEN_REFN : self.__person_attr,
# TYPE should be below REFN, but will work here anyway
TOKEN_TYPE : self.__person_attr,
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
TOKEN_RIN : self.__person_attr,
# +1 <<CHANGE_DATE>> {0:1}
TOKEN_CHAN : self.__person_chan,
TOKEN_ADDR : self.__person_addr,
TOKEN_PHON : self.__person_phon,
TOKEN_EMAIL : self.__person_email,
TOKEN_URL : self.__person_url,
TOKEN__TODO : self.__skip_record,
TOKEN_TITL : self.__person_titl,
}
self.func_list.append(self.indi_parse_tbl)
self.name_parse_tbl = {
# +1 NPFX <NAME_PIECE_PREFIX> {0:1}
TOKEN_NPFX : self.__name_npfx,
# +1 GIVN <NAME_PIECE_GIVEN> {0:1}
TOKEN_GIVN : self.__name_givn,
# NICK <NAME_PIECE_NICKNAME> {0:1}
TOKEN_NICK : self.__name_nick,
# +1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
TOKEN_SPFX : self.__name_spfx,
# +1 SURN <NAME_PIECE_SURNAME> {0:1}
TOKEN_SURN : self.__name_surn,
# +1 NSFX <NAME_PIECE_SUFFIX> {0:1}
TOKEN_NSFX : self.__name_nsfx,
# +1 <<SOURCE_CITATION>> {0:M}
TOKEN_SOUR : self.__name_sour,
# +1 <<NOTE_STRUCTURE>> {0:M}
TOKEN_NOTE : self.__name_note,
TOKEN_RNOTE : self.__name_note,
# Extensions
TOKEN_ALIA : self.__name_alia,
TOKEN__MARNM : self.__name_marnm,
TOKEN__MAR : self.__name_marnm, # Generated by geni.com
TOKEN__MARN : self.__name_marnm, # Gen'd by BROSKEEP 6.1.31 WIN
TOKEN__AKA : self.__name_aka, # PAF and AncestQuest
TOKEN_TYPE : self.__name_type, # This is legal GEDCOM 5.5.1
TOKEN_BIRT : self.__ignore,
TOKEN_DATE : self.__name_date,
# This handles date as a subsidiary of "1 ALIA" which might be used
# by Family Tree Maker and Reunion, and by cheating (handling a
# lower level from the current parse table) handles date as
# subsidiary to "2 _MARN", "2 _AKAN" and "2 _ADPN" which has been
# found in Brother's keeper.
TOKEN__ADPN : self.__name_adpn,
}
self.func_list.append(self.name_parse_tbl)
#
# Parse table for <<REPOSITORY_RECORD>> below the level 0 REPO tag
#
# n @<XREF:REPO>@ REPO {1:1}
# +1 NAME <NAME_OF_REPOSITORY> {0:1}
# +1 <<ADDRESS_STRUCTURE>> {0:1}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
self.repo_parse_tbl = {
TOKEN_NAME : self.__repo_name,
TOKEN_ADDR : self.__repo_addr,
TOKEN_RIN : self.__ignore,
TOKEN_NOTE : self.__repo_note,
TOKEN_RNOTE : self.__repo_note,
TOKEN_CHAN : self.__repo_chan,
TOKEN_PHON : self.__repo_phon,
TOKEN_EMAIL : self.__repo_email,
TOKEN_WWW : self.__repo_www,
}
self.func_list.append(self.repo_parse_tbl)
self.event_parse_tbl = {
# n TYPE <EVENT_DESCRIPTOR> {0:1}
TOKEN_TYPE : self.__event_type,
# n DATE <DATE_VALUE> {0:1} p.*/*
TOKEN_DATE : self.__event_date,
# n <<PLACE_STRUCTURE>> {0:1} p.*
TOKEN_PLAC : self.__event_place,
# n <<ADDRESS_STRUCTURE>> {0:1} p.*
TOKEN_ADDR : self.__event_addr,
# n AGE <AGE_AT_EVENT> {0:1} p.*
TOKEN_AGE : self.__event_age,
# n AGNC <RESPONSIBLE_AGENCY> {0:1} p.*
TOKEN_AGNC : self.__event_agnc,
# n CAUS <CAUSE_OF_EVENT> {0:1} p.*
TOKEN_CAUS : self.__event_cause,
# n <<SOURCE_CITATION>> {0:M} p.*
TOKEN_SOUR : self.__event_source,
# n <<MULTIMEDIA_LINK>> {0:M} p.*, *
TOKEN_OBJE : self.__event_object,
# n <<NOTE_STRUCTURE>> {0:M} p.
TOKEN_NOTE : self.__event_inline_note,
TOKEN_RNOTE : self.__event_note,
# Other
TOKEN__PRIV : self.__event_privacy,
TOKEN_OFFI : self.__event_note,
TOKEN_PHON : self.__event_phon,
TOKEN__GODP : self.__event_witness,
TOKEN__WITN : self.__event_witness,
TOKEN__WTN : self.__event_witness,
TOKEN_RELI : self.__ignore,
# Not legal, but inserted by PhpGedView
TOKEN_TIME : self.__event_time,
TOKEN_ASSO : self.__ignore,
TOKEN_IGNORE : self.__ignore,
TOKEN_STAT : self.__ignore,
TOKEN_TEMP : self.__ignore,
TOKEN_HUSB : self.__event_husb,
TOKEN_WIFE : self.__event_wife,
TOKEN_FAMC : self.__person_birth_famc,
# Not legal, but inserted by Ultimate Family Tree
TOKEN_CHAN : self.__ignore,
TOKEN_QUAY : self.__ignore,
# Not legal, but inserted by FamilyTreeBuilder
TOKEN_RIN : self.__event_rin,
TOKEN_ATTR : self.__event_attr, # FTB for _UID
TOKEN_EMAIL : self.__event_email, # FTB for RESI events
TOKEN_WWW : self.__event_www, # FTB for RESI events
}
self.func_list.append(self.event_parse_tbl)
self.adopt_parse_tbl = {
TOKEN_TYPE : self.__event_type,
TOKEN__PRIV : self.__event_privacy,
TOKEN_DATE : self.__event_date,
TOKEN_SOUR : self.__event_source,
TOKEN_PLAC : self.__event_place,
TOKEN_ADDR : self.__event_addr,
TOKEN_PHON : self.__event_phon,
TOKEN_CAUS : self.__event_cause,
TOKEN_AGNC : self.__event_agnc,
TOKEN_AGE : self.__event_age,
TOKEN_NOTE : self.__event_note,
TOKEN_RNOTE : self.__event_note,
TOKEN_OFFI : self.__event_note,
TOKEN__GODP : self.__event_witness,
TOKEN__WITN : self.__event_witness,
TOKEN__WTN : self.__event_witness,
TOKEN_RELI : self.__ignore,
TOKEN_TIME : self.__ignore,
TOKEN_ASSO : self.__ignore,
TOKEN_IGNORE : self.__ignore,
TOKEN_STAT : self.__ignore,
TOKEN_TEMP : self.__ignore,
TOKEN_OBJE : self.__event_object,
TOKEN_FAMC : self.__person_adopt_famc,
# Not legal, but inserted by Ultimate Family Tree
TOKEN_CHAN : self.__ignore,
TOKEN_QUAY : self.__ignore,
}
self.func_list.append(self.adopt_parse_tbl)
self.famc_parse_tbl = {
# n FAMC @<XREF:FAM>@ {1:1}
# +1 PEDI <PEDIGREE_LINKAGE_TYPE> {0:M} p.*
TOKEN_PEDI : self.__person_famc_pedi,
# +1 <<NOTE_STRUCTURE>> {0:M} p.*
TOKEN_NOTE : self.__person_famc_note,
TOKEN_RNOTE : self.__person_famc_note,
# Extras
TOKEN__PRIMARY: self.__person_famc_primary,
TOKEN_SOUR : self.__person_famc_sour,
# GEDit
TOKEN_STAT : self.__ignore,
}
self.func_list.append(self.famc_parse_tbl)
self.person_fact_parse_tbl = {
TOKEN_TYPE : self.__person_fact_type,
TOKEN_SOUR : self.__person_attr_source,
TOKEN_NOTE : self.__person_attr_note,
TOKEN_RNOTE : self.__person_attr_note,
}
self.func_list.append(self.person_fact_parse_tbl)
self.person_attr_parse_tbl = {
TOKEN_TYPE : self.__person_attr_type,
TOKEN_CAUS : self.__ignore,
TOKEN_DATE : self.__ignore,
TOKEN_TIME : self.__ignore,
TOKEN_ADDR : self.__ignore,
TOKEN_IGNORE : self.__ignore,
TOKEN_STAT : self.__ignore,
TOKEN_TEMP : self.__ignore,
TOKEN_OBJE : self.__ignore,
TOKEN_SOUR : self.__person_attr_source,
TOKEN_PLAC : self.__person_attr_place,
TOKEN_NOTE : self.__person_attr_note,
TOKEN_RNOTE : self.__person_attr_note,
}
self.func_list.append(self.person_attr_parse_tbl)
self.lds_parse_tbl = {
TOKEN_TEMP : self.__lds_temple,
TOKEN_DATE : self.__lds_date,
TOKEN_FAMC : self.__lds_famc,
TOKEN_FORM : self.__lds_form,
TOKEN_PLAC : self.__lds_plac,
TOKEN_SOUR : self.__lds_sour,
TOKEN_NOTE : self.__lds_note,
TOKEN_RNOTE : self.__lds_note,
TOKEN_STAT : self.__lds_stat,
}
self.func_list.append(self.lds_parse_tbl)
self.asso_parse_tbl = {
TOKEN_RELA : self.__person_asso_rela,
TOKEN_SOUR : self.__person_asso_sour,
TOKEN_NOTE : self.__person_asso_note,
TOKEN_RNOTE : self.__person_asso_note,
}
self.func_list.append(self.asso_parse_tbl)
self.citation_parse_tbl = {
TOKEN_PAGE : self.__citation_page,
TOKEN_DATE : self.__citation_date,
TOKEN_DATA : self.__citation_data,
TOKEN_OBJE : self.__citation_obje,
TOKEN_REFN : self.__citation_refn,
TOKEN_EVEN : self.__citation_even,
TOKEN_IGNORE : self.__ignore,
TOKEN__LKD : self.__ignore,
TOKEN_QUAY : self.__citation_quay,
TOKEN_NOTE : self.__citation_note,
TOKEN_RNOTE : self.__citation_note,
TOKEN_TEXT : self.__citation_data_text,
}
self.func_list.append(self.citation_parse_tbl)
self.object_parse_tbl = {
TOKEN_FORM : self.__object_ref_form,
TOKEN_TITL : self.__object_ref_titl,
TOKEN_FILE : self.__object_ref_file,
TOKEN_NOTE : self.__object_ref_note,
TOKEN_RNOTE : self.__object_ref_note,
TOKEN_IGNORE : self.__ignore,
}
self.func_list.append(self.object_parse_tbl)
self.parse_loc_tbl = {
TOKEN_ADR1 : self.__location_adr1,
TOKEN_ADR2 : self.__location_adr2,
TOKEN_CITY : self.__location_city,
TOKEN_STAE : self.__location_stae,
TOKEN_POST : self.__location_post,
TOKEN_CTRY : self.__location_ctry,
# Not legal GEDCOM - not clear why these are included at this level
TOKEN_ADDR : self.__ignore,
TOKEN_DATE : self.__location_date,
TOKEN_NOTE : self.__location_note,
TOKEN_RNOTE : self.__location_note,
TOKEN__LOC : self.__ignore,
TOKEN__NAME : self.__ignore,
TOKEN_PHON : self.__ignore,
TOKEN_IGNORE : self.__ignore,
}
self.func_list.append(self.parse_loc_tbl)
#
# Parse table for <<FAM_RECORD>> below the level 0 FAM tag
#
# n @<XREF:FAM>@ FAM {1:1}
# +1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
# +1 HUSB @<XREF:INDI>@ {0:1}
# +1 WIFE @<XREF:INDI>@ {0:1}
# +1 CHIL @<XREF:INDI>@ {0:M}
# +1 NCHI <COUNT_OF_CHILDREN> {0:1}
# +1 SUBM @<XREF:SUBM>@ {0:M}
# +1 <<LDS_SPOUSE_SEALING>> {0:M}
# +1 <<SOURCE_CITATION>> {0:M}
# +1 <<MULTIMEDIA_LINK>> {0:M}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
self.family_func = {
# +1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
TOKEN_GEVENT : self.__family_std_event,
TOKEN_EVEN : self.__fam_even,
# +1 HUSB @<XREF:INDI>@ {0:1}
TOKEN_HUSB : self.__family_husb,
# +1 WIFE @<XREF:INDI>@ {0:1}
TOKEN_WIFE : self.__family_wife,
# +1 CHIL @<XREF:INDI>@ {0:M}
TOKEN_CHIL : self.__family_chil,
# +1 NCHI <COUNT_OF_CHILDREN> {0:1}
# +1 SUBM @<XREF:SUBM>@ {0:M}
# +1 <<LDS_SPOUSE_SEALING>> {0:M}
TOKEN_SLGS : self.__family_slgs,
# +1 <<SOURCE_CITATION>> {0:M}
TOKEN_SOUR : self.__family_source,
# +1 <<MULTIMEDIA_LINK>> {0:M}
TOKEN_OBJE : self.__family_object,
# +1 <<NOTE_STRUCTURE>> {0:M}
TOKEN__COMM : self.__family_comm,
TOKEN_NOTE : self.__family_note,
TOKEN_RNOTE : self.__family_note,
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
TOKEN_REFN : self.__family_cust_attr,
# TYPE should be below REFN, but will work here anyway
TOKEN_TYPE : self.__family_cust_attr,
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
TOKEN_CHAN : self.__family_chan,
TOKEN_ENDL : self.__ignore,
TOKEN_ADDR : self.__ignore,
TOKEN_RIN : self.__family_cust_attr,
TOKEN_SUBM : self.__ignore,
TOKEN_ATTR : self.__family_attr,
}
self.func_list.append(self.family_func)
self.family_rel_tbl = {
TOKEN__FREL : self.__family_frel,
TOKEN__MREL : self.__family_mrel,
TOKEN_ADOP : self.__family_adopt,
TOKEN__STAT : self.__family_stat,
}
self.func_list.append(self.family_rel_tbl)
#
# Parse table for <<SOURCE_RECORD>> below the level 0 SOUR tag
#
# n @<XREF:SOUR>@ SOUR {1:1}
# +1 DATA {0:1}
# +2 EVEN <EVENTS_RECORDED> {0:M}
# +3 DATE <DATE_PERIOD> {0:1}
# +3 PLAC <SOURCE_JURISDICTION_PLACE> {0:1}
# +2 AGNC <RESPONSIBLE_AGENCY> {0:1}
# +2 <<NOTE_STRUCTURE>> {0:M}
# +1 AUTH <SOURCE_ORIGINATOR> {0:1}
# +2 [CONT|CONC] <SOURCE_ORIGINATOR> {0:M}
# +1 TITL <SOURCE_DESCRIPTIVE_TITLE> {0:1}
# +2 [CONT|CONC] <SOURCE_DESCRIPTIVE_TITLE> {0:M}
# +1 ABBR <SOURCE_FILED_BY_ENTRY> {0:1}
# +1 PUBL <SOURCE_PUBLICATION_FACTS> {0:1}
# +2 [CONT|CONC] <SOURCE_PUBLICATION_FACTS> {0:M}
# +1 TEXT <TEXT_FROM_SOURCE> {0:1}
# +2 [CONT|CONC] <TEXT_FROM_SOURCE> {0:M}
# +1 <<SOURCE_REPOSITORY_CITATION>> {0:1}
# +1 <<MULTIMEDIA_LINK>> {0:M}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
self.source_func = {
TOKEN_TITL : self.__source_title,
TOKEN_TAXT : self.__source_taxt_peri,
TOKEN_PERI : self.__source_taxt_peri,
TOKEN_AUTH : self.__source_auth,
TOKEN_PUBL : self.__source_publ,
TOKEN_NOTE : self.__source_note,
TOKEN_RNOTE : self.__source_note,
TOKEN_TEXT : self.__source_text,
TOKEN_ABBR : self.__source_abbr,
TOKEN_REFN : self.__source_attr,
TOKEN_RIN : self.__source_attr,
TOKEN_REPO : self.__source_repo,
TOKEN_OBJE : self.__source_object,
TOKEN_CHAN : self.__source_chan,
TOKEN_MEDI : self.__source_attr,
TOKEN__NAME : self.__source_attr,
TOKEN_DATA : self.__ignore,
# TYPE should be below REFN, but will work here anyway
TOKEN_TYPE : self.__source_attr,
TOKEN_CALN : self.__ignore,
# not legal, but Ultimate Family Tree does this
TOKEN_DATE : self.__ignore,
TOKEN_IGNORE : self.__ignore,
}
self.func_list.append(self.source_func)
#
# Parse table for <<MULTIMEDIA_RECORD>> below the level 0 OBJE tag
#
# n @<XREF:OBJE>@ OBJE {1:1}
# +1 FORM <MULTIMEDIA_FORMAT> {1:1}
# +1 TITL <DESCRIPTIVE_TITLE> {0:1}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 <<SOURCE_CITATION>> {0:M}
# +1 BLOB {1:1}
# +2 CONT <ENCODED_MULTIMEDIA_LINE> {1:M}
# +1 OBJE @<XREF:OBJE>@ /* chain to continued object */ {0:1}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
self.obje_func = {
TOKEN_FORM : self.__obje_form,
TOKEN_TITL : self.__obje_title,
TOKEN_FILE : self.__obje_file,
TOKEN_NOTE : self.__obje_note,
TOKEN_RNOTE : self.__obje_note,
TOKEN_BLOB : self.__obje_blob,
TOKEN_REFN : self.__obje_refn,
TOKEN_TYPE : self.__obje_type,
TOKEN_RIN : self.__obje_rin,
TOKEN_CHAN : self.__obje_chan,
}
self.func_list.append(self.obje_func)
self.parse_addr_tbl = {
TOKEN_DATE : self.__address_date,
TOKEN_ADR1 : self.__address_adr1,
TOKEN_ADR2 : self.__address_adr2,
TOKEN_CITY : self.__address_city,
TOKEN_STAE : self.__address_state,
TOKEN_POST : self.__address_post,
TOKEN_CTRY : self.__address_country,
TOKEN_PHON : self.__ignore,
TOKEN_SOUR : self.__address_sour,
TOKEN_NOTE : self.__address_note,
TOKEN_RNOTE : self.__address_note,
TOKEN__LOC : self.__ignore,
TOKEN__NAME : self.__ignore,
TOKEN_IGNORE : self.__ignore,
TOKEN_TYPE : self.__ignore,
TOKEN_CAUS : self.__ignore,
}
self.func_list.append(self.parse_addr_tbl)
self.event_cause_tbl = {
TOKEN_SOUR : self.__event_cause_source,
}
self.func_list.append(self.event_cause_tbl)
self.event_place_map = {
TOKEN_NOTE : self.__event_place_note,
TOKEN_RNOTE : self.__event_place_note,
TOKEN_FORM : self.__event_place_form,
# Not legal.
TOKEN_OBJE : self.__event_place_object,
TOKEN_SOUR : self.__event_place_sour,
TOKEN__LOC : self.__ignore,
TOKEN_MAP : self.__place_map,
# Not legal, but generated by Ultimate Family Tree
TOKEN_QUAY : self.__ignore,
}
self.func_list.append(self.event_place_map)
self.place_map_tbl = {
TOKEN_LATI : self.__place_lati,
TOKEN_LONG : self.__place_long,
}
self.func_list.append(self.place_map_tbl)
self.repo_ref_tbl = {
TOKEN_CALN : self.__repo_ref_call,
TOKEN_NOTE : self.__repo_ref_note,
TOKEN_RNOTE : self.__repo_ref_note,
TOKEN_MEDI : self.__repo_ref_medi,
TOKEN_IGNORE : self.__ignore,
}
self.func_list.append(self.repo_ref_tbl)
self.parse_person_adopt = {
TOKEN_ADOP : self.__person_adopt_famc_adopt,
}
self.func_list.append(self.parse_person_adopt)
self.opt_note_tbl = {
TOKEN_RNOTE : self.__optional_note,
TOKEN_NOTE : self.__optional_note,
}
self.func_list.append(self.opt_note_tbl)
self.citation_data_tbl = {
TOKEN_DATE : self.__citation_data_date,
TOKEN_TEXT : self.__citation_data_text,
TOKEN_RNOTE : self.__citation_data_note,
TOKEN_NOTE : self.__citation_data_note,
}
self.func_list.append(self.citation_data_tbl)
self.citation_even_tbl = {
TOKEN_ROLE : self.__citation_even_role,
}
self.func_list.append(self.citation_even_tbl)
#
# Parse table for <<HEADER>> record below the level 0 HEAD tag
#
# n HEAD {1:1}
# +1 SOUR <APPROVED_SYSTEM_ID> {1:1}
# +2 VERS <VERSION_NUMBER> {0:1}
# +2 NAME <NAME_OF_PRODUCT> {0:1}
# +2 CORP <NAME_OF_BUSINESS> {0:1}
# +3 <<ADDRESS_STRUCTURE>> {0:1}
# +2 DATA <NAME_OF_SOURCE_DATA> {0:1}
# +3 DATE <PUBLICATION_DATE> {0:1}
# +3 COPR <COPYRIGHT_SOURCE_DATA> {0:1}
# +1 DEST <RECEIVING_SYSTEM_NAME> {0:1*}
# +1 DATE <TRANSMISSION_DATE> {0:1}
# +2 TIME <TIME_VALUE> {0:1}
# +1 SUBM @<XREF:SUBM>@ {1:1}
# +1 SUBN @<XREF:SUBN>@ {0:1}
# +1 FILE <FILE_NAME> {0:1}
# +1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
# +1 GEDC {1:1}
# +2 VERS <VERSION_NUMBER> {1:1}
# +2 FORM <GEDCOM_FORM> {1:1}
# +1 CHAR <CHARACTER_SET> {1:1}
# +2 VERS <VERSION_NUMBER> {0:1}
# +1 LANG <LANGUAGE_OF_TEXT> {0:1}
# +1 PLAC {0:1}
# +2 FORM <PLACE_HIERARCHY> {1:1}
# +1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
# +2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
# * NOTE: Submissions to the Family History Department for Ancestral
# File submission or for clearing temple ordinances must use a
# DESTination of ANSTFILE or TempleReady.
self.head_parse_tbl = {
TOKEN_SOUR : self.__header_sour,
TOKEN_NAME : self.__header_sour_name, # This should be below SOUR
TOKEN_VERS : self.__header_sour_vers, # This should be below SOUR
TOKEN_FILE : self.__header_file,
TOKEN_COPR : self.__header_copr,
TOKEN_SUBM : self.__header_subm,
TOKEN_CORP : self.__ignore, # This should be below SOUR
TOKEN_DATA : self.__ignore, # This should be below SOUR
TOKEN_SUBN : self.__header_subn,
TOKEN_LANG : self.__header_lang,
TOKEN_TIME : self.__ignore, # This should be below DATE
TOKEN_DEST : self.__header_dest,
TOKEN_CHAR : self.__header_char,
TOKEN_GEDC : self.__header_gedc,
TOKEN__SCHEMA: self.__ignore,
TOKEN_PLAC : self.__header_plac,
TOKEN_DATE : self.__header_date,
TOKEN_NOTE : self.__header_note,
}
self.func_list.append(self.head_parse_tbl)
self.header_sour_parse_tbl = {
TOKEN_VERS : self.__header_sour_vers,
TOKEN_NAME : self.__header_sour_name,
TOKEN_CORP : self.__header_sour_corp,
TOKEN_DATA : self.__header_sour_data,
}
self.func_list.append(self.header_sour_parse_tbl)
self.header_sour_data = {
TOKEN_DATE : self.__header_sour_date,
TOKEN_COPR : self.__header_sour_copr,
}
self.func_list.append(self.header_sour_data)
self.header_corp_addr = {
TOKEN_ADDR : self.__repo_addr,
TOKEN_PHON : self.__repo_phon,
}
self.func_list.append(self.header_corp_addr)
self.header_subm = {
TOKEN_NAME : self.__header_subm_name,
}
self.func_list.append(self.header_subm)
self.place_form = {
TOKEN_FORM : self.__place_form,
}
self.func_list.append(self.place_form)
#
# Parse table for <<NOTE_RECORD>> below the level 0 NOTE tag
#
# n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
# +1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
# +1 <<SOURCE_CITATION>> {0:M}
# +1 REFN <USER_REFERENCE_NUMBER> {0:M}
# +2 TYPE <USER_REFERENCE_TYPE> {0:1}
# +1 RIN <AUTOMATED_RECORD_ID> {0:1}
# +1 <<CHANGE_DATE>> {0:1}
self.note_parse_tbl = {
TOKEN_SOUR : self.__ignore,
TOKEN_REFN : self.__ignore,
TOKEN_RIN : self.__ignore,
TOKEN_CHAN : self.__note_chan,
}
self.func_list.append(self.note_parse_tbl)
# look for existing place titles, build a map
self.place_names = {}
cursor = dbase.get_place_cursor()
data = next(cursor)
while data:
(handle, val) = data
self.place_names[val[2]] = handle
data = next(cursor)
cursor.close()
enc = stage_one.get_encoding()
if enc == "ANSEL":
rdr = AnselReader(ifile)
elif enc in ("UTF-8", "UTF8"):
rdr = UTF8Reader(ifile)
elif enc in ("UTF-16", "UTF16", "UNICODE"):
rdr = UTF16Reader(ifile)
else:
rdr = AnsiReader(ifile)
self.lexer = Lexer(rdr)
self.filename = filename
self.backoff = False
fullpath = os.path.normpath(os.path.abspath(filename))
self.geddir = os.path.dirname(fullpath)
self.error_count = 0
amap = PERSONALCONSTANTATTRIBUTES
self.attrs = list(amap.values())
self.gedattr = dict([key, val] for val, key in amap.items())
self.search_paths = []
def parse_gedcom_file(self, use_trans=False):
"""
Parses the opened GEDCOM file.
LINEAGE_LINKED_GEDCOM: =
0 <<HEADER>> {1:1}
0 <<SUBMISSION_RECORD>> {0:1}
0 <<RECORD>> {1:M}
0 TRLR {1:1}
"""
no_magic = self.maxpeople < 1000
with DbTxn(_("GEDCOM import"), self.dbase, not use_trans,
no_magic=no_magic) as self.trans:
self.dbase.disable_signals()
self.__parse_header_head()
self.want_parse_warnings = False
self.__parse_header()
self.want_parse_warnings = True
if self.use_def_src:
self.dbase.add_source(self.def_src, self.trans)
if self.default_tag and self.default_tag.handle is None:
self.dbase.add_tag(self.default_tag, self.trans)
self.__parse_record()
self.__parse_trailer()
for title, handle in self.inline_srcs.items():
src = Source()
src.set_handle(handle)
src.set_title(title)
self.dbase.add_source(src, self.trans)
self.__clean_up()
self.place_import.generate_hierarchy(self.trans)
if not self.dbase.get_feature("skip-check-xref"):
self.__check_xref()
self.dbase.enable_signals()
self.dbase.request_rebuild()
if self.number_of_errors == 0:
message = _("GEDCOM import report: No errors detected")
else:
message = _("GEDCOM import report: %s errors detected") % \
self.number_of_errors
self.user.info(message, "".join(self.errors), monospaced=True)
def __clean_up(self):
"""
Break circular references to parsing methods stored in dictionaries
to aid garbage collection
"""
for func_map in self.func_list:
for key in list(func_map.keys()):
del func_map[key]
del func_map
del self.func_list
del self.update
self.lexer.clean_up()
def __find_person_handle(self, gramps_id):
"""
Return the database handle associated with the person's GRAMPS ID
"""
return self.__find_from_handle(gramps_id, self.gid2id)
def __find_family_handle(self, gramps_id):
"""
Return the database handle associated with the family's GRAMPS ID
"""
return self.__find_from_handle(gramps_id, self.fid2id)
def __find_object_handle(self, gramps_id):
"""
Return the database handle associated with the media object's GRAMPS ID
"""
return self.__find_from_handle(gramps_id, self.oid2id)
def __find_note_handle(self, gramps_id):
"""
Return the database handle associated with the media object's GRAMPS ID
"""
return self.__find_from_handle(gramps_id, self.nid2id)
def __find_or_create_person(self, gramps_id):
"""
Finds or creates a person based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new person, assign the handle and GRAMPS ID.
"""
person = Person()
intid = self.gid2id.get(gramps_id)
if self.dbase.has_person_handle(intid):
person.unserialize(self.dbase.get_raw_person_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.gid2id)
person.set_handle(intid)
person.set_gramps_id(gramps_id)
return person
def __find_or_create_family(self, gramps_id):
"""
Finds or creates a family based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new family, assign the handle and GRAMPS ID.
"""
family = Family()
# Add a counter for reordering the children later:
family.child_ref_count = 0
intid = self.fid2id.get(gramps_id)
if self.dbase.has_family_handle(intid):
family.unserialize(self.dbase.get_raw_family_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.fid2id)
family.set_handle(intid)
family.set_gramps_id(gramps_id)
return family
def __find_or_create_object(self, gramps_id):
"""
Finds or creates a media object based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new media object, assign the handle and GRAMPS ID.
"""
obj = MediaObject()
intid = self.oid2id.get(gramps_id)
if self.dbase.has_object_handle(intid):
obj.unserialize(self.dbase.get_raw_object_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.oid2id)
obj.set_handle(intid)
obj.set_gramps_id(gramps_id)
return obj
def __find_or_create_source(self, gramps_id):
"""
Find or create a source based on the GRAMPS ID.
If the ID is already used (is in the db), we return the item in the
db. Otherwise, we create a new source, assign the handle and GRAMPS ID.
"""
obj = Source()
intid = self.sid2id.get(gramps_id)
if self.dbase.has_source_handle(intid):
obj.unserialize(self.dbase.get_raw_source_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.sid2id)
obj.set_handle(intid)
obj.set_gramps_id(gramps_id)
return obj
def __find_or_create_repository(self, gramps_id):
"""
Finds or creates a repository based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new repository, assign the handle and GRAMPS ID.
Some GEDCOM "flavors" destroy the specification, and declare the
repository inline instead of in a object.
"""
repository = Repository()
intid = self.rid2id.get(gramps_id)
if self.dbase.has_repository_handle(intid):
repository.unserialize(self.dbase.get_raw_repository_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.rid2id)
repository.set_handle(intid)
repository.set_gramps_id(gramps_id)
return repository
def __find_or_create_note(self, gramps_id):
"""
Finds or creates a repository based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new repository, assign the handle and GRAMPS ID.
Some GEDCOM "flavors" destroy the specification, and declare the
repository inline instead of in a object.
"""
note = Note()
if not gramps_id:
need_commit = True
gramps_id = self.dbase.find_next_note_gramps_id()
else:
need_commit = False
intid = self.nid2id.get(gramps_id)
if self.dbase.has_note_handle(intid):
note.unserialize(self.dbase.get_raw_note_data(intid))
else:
intid = self.__find_from_handle(gramps_id, self.nid2id)
note.set_handle(intid)
note.set_gramps_id(gramps_id)
if need_commit:
self.dbase.add_note(note, self.trans)
return note
def __find_or_create_place(self, title):
"""
Finds or creates a place based on the GRAMPS ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new place, assign the handle and GRAMPS ID.
"""
place = Place()
# check to see if we've encountered this name before
# if we haven't we need to get a new GRAMPS ID
intid = self.place_names.get(title)
if intid is None:
intid = self.lid2id.get(title)
if intid is None:
new_id = self.dbase.find_next_place_gramps_id()
else:
new_id = None
else:
new_id = None
# check to see if the name already existed in the database
# if it does, create a new name by appending the GRAMPS ID.
# generate a GRAMPS ID if needed
if self.dbase.has_place_handle(intid):
place.unserialize(self.dbase.get_raw_place_data(intid))
else:
intid = create_id()
place.set_handle(intid)
place.set_title(title)
place.set_gramps_id(new_id)
self.dbase.add_place(place, self.trans)
self.lid2id[title] = intid
return place
def __find_file(self, fullname, altpath):
tries = []
fullname = fullname.replace('\\', os.path.sep)
tries.append(fullname)
try:
if os.path.isfile(fullname):
return (1, fullname)
except UnicodeEncodeError:
# FIXME: problem possibly caused by umlaut/accented character
# in filename
return (0, tries)
other = os.path.join(altpath, fullname)
tries.append(other)
if os.path.isfile(other):
return (1, other)
other = os.path.join(altpath, os.path.basename(fullname))
tries.append(other)
if os.path.isfile(other):
return (1, other)
if len(fullname) > 3:
if fullname[1] == ':':
fullname = fullname[2:]
for path in self.search_paths:
other = os.path.normpath("%s/%s" % (path, fullname))
tries.append(other)
if os.path.isfile(other):
return (1, other)
return (0, tries)
else:
return (0, tries)
def __get_next_line(self):
"""
Get the next line for analysis from the lexical analyzer. Return the
same value if the _backup flag is set.
"""
if not self.backoff:
self.groups = self.lexer.readline()
self.update()
# EOF ?
if not self.groups:
self.backoff = False
# We will add the truncation warning message to the error
# messages report, even though it probably won't be reported
# because the exception below gets raised before the report is
# produced. We do this in case __add_msg is changed in the
# future to do something else
self.__add_msg(self.__TRUNC_MSG)
self.groups = None
raise GedcomError(self.__TRUNC_MSG)
self.backoff = False
return self.groups
def __undefined(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__not_recognized(line, state.level+1, state)
def __ignore(self, line, state):
"""
Ignores an unsupported tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("Tag recognized but not supported"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __not_recognized(self, line, level, state):
"""
Prints a message when an undefined token is found. All subordinate items
to the current item are ignored.
@param level: Current level in the file
@type level: int
"""
self.__add_msg(_("Line ignored as not understood"), line, state)
self.__skip_subordinate_levels(level, state)
def __skip_record(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__skip_subordinate_levels(2, state)
def __skip_subordinate_levels(self, level, state):
"""
Skip add lines of the specified level or lower.
"""
skips = 0
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, level):
if skips:
# This improves formatting when there are long sequences of
# skipped lines
self.__add_msg("", None, None)
return
self.__add_msg(_("Skipped subordinate line"), line, state)
skips += 1
def __level_is_finished(self, text, level):
"""
Check to see if the level has been completed, indicated by finding
a level indiciated by the passed level value. If the level is finished,
then make sure to call self._backup to reset the text pointer.
"""
done = text.level < level
if done:
self._backup()
return done
def __add_msg(self, problem, line=None, state=None):
if problem != "":
self.number_of_errors += 1
if line:
prob_width = 66
problem = problem.ljust(prob_width)[0:(prob_width-1)]
text = str(line.data).replace("\n", "\n".ljust(prob_width + 22))
message = "%s Line %5d: %s %s %s\n" % (problem, line.line,
line.level,
line.token_text,
text)
else:
message = problem + "\n"
if state:
state.msg += message
self.errors.append(message)
def __check_msgs(self, record_name, state, obj):
if state.msg == "":
return
message = _("Records not imported into ") + record_name + ":\n\n" + \
state.msg
new_note = Note()
tag = StyledTextTag(StyledTextTagType.FONTFACE, 'Monospace',
[(0, len(message))])
text = StyledText(message, [tag])
new_note.set_styledtext(text)
new_note.set_handle(create_id())
note_type = NoteType()
note_type.set((NoteType.CUSTOM, _("GEDCOM import")))
new_note.set_type(note_type)
self.dbase.add_note(new_note, self.trans)
# If possible, attach the note to the relevant object
if obj:
obj.add_note(new_note.get_handle())
def _backup(self):
"""
Set the _backup flag so that the current line can be accessed by the
next level up.
"""
self.backoff = True
def __check_xref(self):
def __check(map, trans, class_func, commit_func, gramps_id2handle, msg):
for input_id, gramps_id in map.map().items():
# Check whether an object exists for the mapped gramps_id
bgramps_id = gramps_id.encode('utf-8')
if not trans.get(bgramps_id):
handle = self.__find_from_handle(gramps_id,
gramps_id2handle)
if msg == "FAM":
make_unknown(gramps_id, self.explanation.handle,
class_func, commit_func, self.trans,
db=self.dbase)
self.__add_msg(_("Error: %(msg)s '%(gramps_id)s'"
" (input as @%(xref)s@) not in input"
" GEDCOM. Record synthesised") %
{'msg' : msg, 'gramps_id' : gramps_id,
'xref' : input_id})
else:
make_unknown(gramps_id, self.explanation.handle,
class_func, commit_func, self.trans)
self.missing_references +=1
self.__add_msg(_("Error: %(msg)s '%(gramps_id)s'"
" (input as @%(xref)s@) not in input"
" GEDCOM. Record with typifying"
" attribute 'Unknown' created") %
{'msg' : msg, 'gramps_id' : gramps_id,
'xref' : input_id})
self.explanation = create_explanation_note(self.dbase)
self.missing_references = 0
previous_errors = self.number_of_errors
__check(self.pid_map, self.dbase.id_trans, self.__find_or_create_person,
self.dbase.commit_person, self.gid2id, "INDI")
__check(self.fid_map, self.dbase.fid_trans, self.__find_or_create_family,
self.dbase.commit_family, self.fid2id, "FAM")
__check(self.sid_map, self.dbase.sid_trans, self.__find_or_create_source,
self.dbase.commit_source, self.sid2id, "SOUR")
__check(self.oid_map, self.dbase.oid_trans, self.__find_or_create_object,
self.dbase.commit_media_object, self.oid2id, "OBJE")
__check(self.rid_map, self.dbase.rid_trans, self.__find_or_create_repository,
self.dbase.commit_repository, self.rid2id, "REPO")
__check(self.nid_map, self.dbase.nid_trans, self.__find_or_create_note,
self.dbase.commit_note, self.nid2id, "NOTE")
# Check persons membership in referenced families
def __input_fid(gramps_id):
for (k,v) in self.fid_map.map().items():
if v == gramps_id:
return k
for input_id, gramps_id in self.pid_map.map().items():
person_handle = self.__find_from_handle(gramps_id, self.gid2id)
person = self.dbase.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.dbase.get_family_from_handle(family_handle)
if family and family.get_father_handle() != person_handle and \
family.get_mother_handle() != person_handle:
person.remove_family_handle(family_handle)
self.dbase.commit_person(person, self.trans)
self.__add_msg(_("Error: family '%(family)s' (input as"
" @%(orig_family)s@) person %(person)s"
" (input as %(orig_person)s) is not a"
" member of the referenced family."
" Family reference removed from person") %
{'family' : family.gramps_id,
'orig_family' :
__input_fid(family.gramps_id),
'person' : person.gramps_id,
'orig_person' : input_id})
def __input_pid(gramps_id):
for (k,v) in self.pid_map.map().items():
if v == gramps_id:
return k
for input_id, gramps_id in self.fid_map.map().items():
family_handle = self.__find_from_handle(gramps_id, self.fid2id)
family = self.dbase.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if father_handle:
father = self.dbase.get_person_from_handle(father_handle)
if father and \
family_handle not in father.get_family_handle_list():
father.add_family_handle(family_handle)
self.dbase.commit_person(father, self.trans)
self.__add_msg("Error: family '%(family)s' (input as"
" @%(orig_family)s@) father '%(father)s'"
" (input as '%(orig_father)s') does not refer"
" back to the family. Reference added." %
{'family' : family.gramps_id,
'orig_family' : input_id,
'father' : father.gramps_id,
'orig_father' :
__input_pid(father.gramps_id)})
if mother_handle:
mother = self.dbase.get_person_from_handle(mother_handle)
if mother and \
family_handle not in mother.get_family_handle_list():
mother.add_family_handle(family_handle)
self.dbase.commit_person(mother, self.trans)
self.__add_msg("Error: family '%(family)s' (input as"
" @%(orig_family)s@) mother '%(mother)s'"
" (input as '%(orig_mother)s') does not refer"
" back to the family. Reference added." %
{'family' : family.gramps_id,
'orig_family' : input_id,
'mother' : mother.gramps_id,
'orig_mother' :
__input_pid(mother.gramps_id)})
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.dbase.get_person_from_handle(child_handle)
if child:
if family_handle not in \
child.get_parent_family_handle_list():
# The referenced child has no reference to the family.
# There was a link from the FAM record to the child, but
# no FAMC link from the child to the FAM.
child.add_parent_family_handle(family_handle)
self.dbase.commit_person(child, self.trans)
self.__add_msg("Error: family '%(family)s' (input as"
" @%(orig_family)s@) child '%(child)s'"
" (input as '%(orig_child)s') does not "
"refer back to the family. "
"Reference added." %
{'family' : family.gramps_id,
'orig_family' : input_id,
'child' : child.gramps_id,
'orig_child' :
__input_pid(child.gramps_id)})
if self.missing_references:
self.dbase.commit_note(self.explanation, self.trans, time.time())
txt = _("\nThe imported file was not self-contained.\n"
"To correct for that, %(new)d objects were created and\n"
"their typifying attribute was set to 'Unknown'.\n"
"Where possible these 'Unknown' objects are \n"
"referenced by note %(unknown)s.\n"
) % {'new': self.missing_references, 'unknown': self.explanation.gramps_id}
self.__add_msg(txt)
self.number_of_errors -= 1
def __merge_address(self, free_form_address, addr, line, state):
"""
Merge freeform and structured addrssses.
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
This is done along the lines suggested by Tamura Jones in
http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382.
"When a GEDCOM reader encounters a double address, it should read the
structured address. ... A GEDCOM reader that does verify that the
addresses are the same should issue an error if they are not".
This is called for SUBMitter addresses (__subm_addr), INDIvidual
addresses (__person_addr), REPO addresses and HEADer corp address
(__repo_address) and EVENt addresses (__event_adr).
The structured address (if any) will have been accumulated into an
object of type LocationBase, which will either be a Location, or an
Address object.
If ADDR is provided, but none of ADR1, ADR2, CITY, STAE, or POST (not
CTRY), then Street is set to the freeform address. N.B. this is a change
for Repository addresses and HEADer Corp address where previously the
free-form address was deconstrucated into different structured
components. N.B. PAF provides a free-form address and a country, so this
allows for that case.
If both forms of address are provided, then the structured address is
used, and if the ADDR/CONT contains anything not in the structured
address, a warning is issued.
If just ADR1, ADR2, CITY, STAE, POST or CTRY are provided (this is not
actually legal GEDCOM symtax, but may be possible by GEDCOM extensions)
then just the structrued address is used.
"""
if not (addr.get_street() or addr.get_locality() or
addr.get_city() or addr.get_state() or
addr.get_postal_code()):
addr.set_street(free_form_address)
else:
# structured address provided
addr_list = free_form_address.split("\n")
str_list = []
for func in (addr.get_street(), addr.get_locality(),
addr.get_city(), addr.get_state(),
addr.get_postal_code(), addr.get_country()):
str_list += [i.strip(',' + string.whitespace) for i in func.split("\n")]
for elmn in addr_list:
if elmn.strip(',' + string.whitespace) not in str_list:
# message means that the element %s was ignored, but
# expressed the wrong way round because the message is
# truncated for output
self.__add_msg(_("ADDR element ignored '%s'"
% elmn), line, state)
# The free-form address ADDR is discarded
def __parse_trailer(self):
"""
Looks for the expected TRLR token
"""
try:
line = self.__get_next_line()
if line and line.token != TOKEN_TRLR:
state = CurrentState()
self.__not_recognized(line, 0, state)
self.__check_msgs(_("TRLR (trailer)"), state, None)
except TypeError:
return
def __parse_submitter(self, line):
"""
Parses the submitter data
n @<XREF:SUBM>@ SUBM
+1 NAME <SUBMITTER_NAME>
+1 <<ADDRESS_STRUCTURE>>
+1 <<MULTIMEDIA_LINK>>
+1 LANG <LANGUAGE_PREFERENCE>
+1 RFN <SUBMITTER_REGISTERED_RFN>
+1 RIN <AUTOMATED_RECORD_ID>
+1 <<CHANGE_DATE>>
"""
researcher = Researcher()
state = CurrentState()
state.res = researcher
state.level = 1
repo = Repository()
state.repo = repo
self.__parse_level(state, self.subm_parse_tbl, self.__undefined)
# If this is the submitter that we were told about in the HEADer, then
# we will need to update the researcher
if line.token_text == self.subm:
self.dbase.set_researcher(state.res)
submitter_name = _("SUBM (Submitter): @%s@") % line.token_text
if self.use_def_src:
repo.set_name(submitter_name)
repo.set_handle(create_id())
repo.set_gramps_id(self.dbase.find_next_repository_gramps_id())
addr = Address()
addr.set_street(state.res.get_address())
addr.set_locality(state.res.get_locality())
addr.set_city(state.res.get_city())
addr.set_state(state.res.get_state())
addr.set_country(state.res.get_country())
addr.set_postal_code(state.res.get_postal_code())
addr.set_county(state.res.get_county())
addr.set_phone(state.res.get_phone())
repo.add_address(addr)
if state.res.get_email():
url = Url()
url.set_path(state.res.get_email())
url.set_type(UrlType(UrlType.EMAIL))
repo.add_url(url)
rtype = RepositoryType()
rtype.set((RepositoryType.CUSTOM, _('GEDCOM data')))
repo.set_type(rtype)
self.__check_msgs(submitter_name, state, repo)
self.dbase.commit_repository(repo, self.trans, state.repo.change)
repo_ref = RepoRef()
repo_ref.set_reference_handle(repo.handle)
mtype = SourceMediaType()
mtype.set((SourceMediaType.UNKNOWN, ''))
repo_ref.set_media_type(mtype)
self.def_src.add_repo_reference(repo_ref)
self.dbase.commit_source(self.def_src, self.trans)
else:
self.__check_msgs(submitter_name, state, None)
def __parse_record(self):
"""
Parse the top level (0 level) instances.
RECORD: =
[
n <<FAM_RECORD>> {1:1}
|
n <<INDIVIDUAL_RECORD>> {1:1}
|
n <<MULTIMEDIA_RECORD>> {1:M}
|
n <<NOTE_RECORD>> {1:1}
|
n <<REPOSITORY_RECORD>> {1:1}
|
n <<SOURCE_RECORD>> {1:1}
|
n <<SUBMITTER_RECORD>> {1:1}
]
This also deals with the SUBN (submission) record, of which there should
be exactly one.
"""
while True:
line = self.__get_next_line()
key = line.data
if not line or line.token == TOKEN_TRLR:
self._backup()
break
if line.token == TOKEN_UNKNOWN:
state = CurrentState()
self.__add_msg(_("Unknown tag"), line, state)
self.__skip_subordinate_levels(1, state)
self.__check_msgs(_("Top Level"), state, None)
elif key in ("FAM", "FAMILY"):
self.__parse_fam(line)
elif key in ("INDI", "INDIVIDUAL"):
self.__parse_indi(line)
elif key in ("OBJE", "OBJECT"):
self.__parse_obje(line)
elif key in ("REPO", "REPOSITORY"):
self.__parse_repo(line)
elif key in ("SUBM", "SUBMITTER"):
self.__parse_submitter(line)
elif key in ("SUBN"):
state = CurrentState()
self.__parse_submission(line, state)
self.__check_msgs(_("Top Level"), state, None)
elif line.token in (TOKEN_SUBM, TOKEN_SUBN, TOKEN_IGNORE):
state = CurrentState()
self.__skip_subordinate_levels(1, state)
self.__check_msgs(_("Top Level"), state, None)
elif key in ("SOUR", "SOURCE"):
self.__parse_source(line.token_text, 1)
elif (line.data.startswith("SOUR ") or
line.data.startswith("SOURCE ")):
# A source formatted in a single line, for example:
# 0 @S62@ SOUR This is the title of the source
source = self.__find_or_create_source(self.sid_map[line.data])
source.set_title(line.data[5:])
self.dbase.commit_source(source, self.trans)
elif key[0:4] == "NOTE":
try:
line.data = line.data[5:]
except:
# don't think this path is ever taken, but if it is..
# ensure a message is emitted & subordinates skipped
line.data = None
self.__parse_inline_note(line, 1)
else:
state = CurrentState()
self.__not_recognized(line, 1, state)
self.__check_msgs(_("Top Level"), state, None)
def __parse_level(self, state, __map, default):
"""
Loop trough the current GEDCOM level, calling the appropriate
functions associated with the TOKEN.
If no matching function for the token is found, the default function
is called instead.
"""
while True:
line = self.__get_next_line()
if line.level < state.level:
self.backoff = True
return
else:
func = __map.get(line.token, default)
func(line, state)
#----------------------------------------------------------------------
#
# INDI parsing
#
#----------------------------------------------------------------------
def __parse_indi(self, line):
"""
Handling of the GEDCOM INDI tag and all lines subordinate to the current
line.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1}
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
# find the person
real_id = self.pid_map[line.token_text]
person = self.__find_or_create_person(real_id)
# set up the state for the parsing
state = CurrentState(person=person, level=1)
# do the actual parsing
self.__parse_level(state, self.indi_parse_tbl, self.__person_event)
# Add the default reference if no source has found
self.__add_default_source(person)
# Add a default tag if provided
self.__add_default_tag(person)
self.__check_msgs(_("INDI (individual) Gramps ID %s") %
person.get_gramps_id(), state, person)
# commit the person to the database
self.dbase.commit_person(person, self.trans, state.person.change)
def __person_sour(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
citation_handle = self.handle_source(line, state.level, state)
state.person.add_citation(citation_handle)
def __person_attr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type((AttributeType.CUSTOM, line.token_text))
attr.set_value(line.data)
state.person.add_attribute(attr)
def __person_event(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# We can get here when a tag that is not valid in the indi_parse_tbl
# parse table is encountered. It is remotely possible that this is
# actally a DATE tag, in which case line.data will be a date object, so
# we need to convert it back to a string here.
event_ref = self.__build_event_pair(state, EventType.CUSTOM,
self.event_parse_tbl,
str(line.data))
state.person.add_event_ref(event_ref)
def __fam_even(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_family_event_pair(state,
EventType.CUSTOM,
self.event_parse_tbl,
line.data)
state.family.add_event_ref(event_ref)
def __person_chan(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_change(line, state.person, state.level+1, state)
def __person_resn(self, line, state):
"""
Parses the RESN tag, adding it as an attribute.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type((AttributeType.CUSTOM, 'RESN'))
state.person.add_attribute(attr)
def __person_alt_name(self, line, state):
"""
This parses the standard GEDCOM structure:
n @XREF:INDI@ INDI {1:1}
+1 ALIA @<XREF:INDI>@ {0:M}
The ALIA tag is supposed to cross reference another person. We will
store this in the Association record.
ALIA {ALIAS}: = An indicator to link different record descriptions of a
person who may be the same person.
Some systems use the ALIA tag as an alternate NAME tag, which is not
legal in GEDCOM, but oddly enough, is easy to support. This parses the
illegal (ALIA or ALIAS) or non-standard (_ALIA) GEDCOM. "1 ALIA" is used
by Family Tree Maker and Reunion. "1 ALIAS" and "1 _ALIA" do not appear
to be used.
n @XREF:INDI@ INDI {1:1}
+1 <ALIA> <NAME_PERSONAL> {1:1}
+2 NPFX <NAME_PIECE_PREFIX> {0:1}
+2 GIVN <NAME_PIECE_GIVEN> {0:1}
+2 NICK <NAME_PIECE_NICKNAME> {0:1}
+2 SPFX <NAME_PIECE_SURNAME_PREFIX> {0:1}
+2 SURN <NAME_PIECE_SURNAME> {0:1}
+2 NSFX <NAME_PIECE_SUFFIX> {0:1}
+2 <<SOURCE_CITATION>> {0:M}
+3 <<NOTE_STRUCTURE>> {0:M}
+3 <<MULTIMEDIA_LINK>> {0:M}
+2 <<NOTE_STRUCTURE>> {0:M}
where <ALIA> == ALIA | _ALIA | ALIAS
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data == '':
self.__add_msg(_("Empty Alias <NAME PERSONAL> ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
elif line.data[0] == '@':
handle = self.__find_person_handle(self.pid_map[line.data])
ref = PersonRef()
ref.ref = handle
ref.rel = "Alias"
state.person.add_person_ref(ref)
else:
self.__parse_alias_name(line, state)
def __parse_alias_name(self, line, state):
"""
Parse a level 1 alias name and subsidiary levels when called from
__person_alt_name (when the <NAME_PERSONAL> does not start with @). Also
parses a level 2 alias name and subsidiary levels when called from
__name_alias.
+1 <ALIA> <NAME_PERSONAL> {1:1}
+2 NPFX <NAME_PIECE_PREFIX> {0:1}
+2 GIVN <NAME_PIECE_GIVEN> {0:1}
+2 NICK <NAME_PIECE_NICKNAME> {0:1}
+2 SPFX <NAME_PIECE_SURNAME_PREFIX> {0:1}
+2 SURN <NAME_PIECE_SURNAME> {0:1}
+2 NSFX <NAME_PIECE_SUFFIX> {0:1}
+2 <<SOURCE_CITATION>> {0:M}
+3 <<NOTE_STRUCTURE>> {0:M}
+3 <<MULTIMEDIA_LINK>> {0:M}
+2 <<NOTE_STRUCTURE>> {0:M}
where <ALIA> == ALIA | _ALIA | ALIAS
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
name = self.__parse_name_personal(line.data)
name.set_type(NameType.AKA)
state.person.add_alternate_name(name)
# Create a new state, and parse the remainder of the NAME level
sub_state = CurrentState()
sub_state.person = state.person
sub_state.name = name
sub_state.level = state.level+1
self.__parse_level(sub_state, self.name_parse_tbl, self.__undefined)
state.msg += sub_state.msg
def __person_object(self, line, state):
"""
Embedded form
> n OBJE @<XREF:OBJE>@ {1:1}
Linked form
> n OBJE {1:1}
> +1 FORM <MULTIMEDIA_FORMAT> {1:1}
> +1 TITL <DESCRIPTIVE_TITLE> {0:1}
> +1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
> +1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.person.add_media_reference(ref)
else:
(form, filename, title, note) = self.__obje(state.level+1, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.person, form, filename, title, note)
def __person_name(self, line, state):
"""
Parsers the NAME token in a GEDCOM file. The text is in the format
of (according to the GEDCOM Spec):
> <TEXT>|/<TEXT>/|<TEXT>/<TEXT>/|/<TEXT>/<TEXT>|<TEXT>/<TEXT>/<TEXT>
We have encountered some variations that use:
> <TEXT>/
The basic Name structure is:
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# build a Name structure from the text
name = self.__parse_name_personal(line.data)
# Add the name as the primary name if this is the first one that
# we have encountered for this person. Assume that if this is the
# first name, that it is a birth name. Otherwise, label it as an
# "Also Known As (AKA)". GEDCOM does not seem to have the concept
# of different name types
if state.name_cnt == 0:
name.set_type(NameType.BIRTH)
state.person.set_primary_name(name)
else:
name.set_type(NameType.AKA)
state.person.add_alternate_name(name)
state.name_cnt += 1
# Create a new state, and parse the remainder of the NAME level
sub_state = CurrentState()
sub_state.person = state.person
sub_state.name = name
sub_state.level = state.level+1
self.__parse_level(sub_state, self.name_parse_tbl, self.__undefined)
state.msg += sub_state.msg
def __person_sex(self, line, state):
"""
Parses the SEX line of a GEDCOM file. It has the format of:
+1 SEX <SEX_VALUE> {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.person.set_gender(line.data)
def __person_even(self, line, state):
"""
Parses the custom EVEN tag, which has the format of:
n <<EVENT_TYPE>> {1:1}
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_event_pair(state, EventType.CUSTOM,
self.event_parse_tbl, line.data)
state.person.add_event_ref(event_ref)
def __person_std_event(self, line, state):
"""
Parses GEDCOM event types that map to a GRAMPS standard type. Additional
parsing required is for the event detail:
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event = line.data
event.set_gramps_id(self.emapper.find_next())
event_ref = EventRef()
self.dbase.add_event(event, self.trans)
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level+1
sub_state.event = event
sub_state.event_ref = event_ref
self.__parse_level(sub_state, self.event_parse_tbl, self.__undefined)
state.msg += sub_state.msg
self.dbase.commit_event(event, self.trans)
event_ref.ref = event.handle
state.person.add_event_ref(event_ref)
def __person_reli(self, line, state):
"""
Parses the RELI tag.
n RELI [Y|<NULL>] {1:1}
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_event_pair(state, EventType.RELIGION,
self.event_parse_tbl, line.data)
state.person.add_event_ref(event_ref)
def __person_birt(self, line, state):
"""
Parses GEDCOM BIRT tag into a GRAMPS birth event. Additional work
must be done, since additional handling must be done by GRAMPS to set
this up as a birth reference event.
n BIRT [Y|<NULL>] {1:1}
+1 <<EVENT_DETAIL>> {0:1} p.*
+1 FAMC @<XREF:FAM>@ {0:1} p.*
I'm not sure what value the FAMC actually offers here, since
the FAMC record should handle this. Why it is a valid sub value
is beyond me.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_event_pair(state, EventType.BIRTH,
self.event_parse_tbl, line.data)
if state.person.get_birth_ref():
state.person.add_event_ref(event_ref)
else:
state.person.set_birth_ref(event_ref)
def __person_adop(self, line, state):
"""
Parses GEDCOM ADOP tag, subordinate to the INDI tag. Additinal tags
are needed by the tag, so we pass a different function map.
n ADOP [Y|<NULL>] {1:1}
+1 <<EVENT_DETAIL>> {0:1} p.*
+1 FAMC @<XREF:FAM>@ {0:1} p.*
+2 ADOP <ADOPTED_BY_WHICH_PARENT> {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_event_pair(state, EventType.ADOPT,
self.adopt_parse_tbl, line.data)
state.person.add_event_ref(event_ref)
def __person_deat(self, line, state):
"""
Parses GEDCOM DEAT tag into a GRAMPS birth event. Additional work
must be done, since additional handling must be done by GRAMPS to set
this up as a death reference event.
n DEAT [Y|<NULL>] {1:1}
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event_ref = self.__build_event_pair(state, EventType.DEATH,
self.event_parse_tbl, line.data)
if state.person.get_death_ref():
state.person.add_event_ref(event_ref)
else:
state.person.set_death_ref(event_ref)
def __person_note(self, line, state):
"""
Parses a note associated with the person
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.person, 1, state)
def __person_rnote(self, line, state):
"""
Parses a note associated with the person
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.person, 1, state)
def __person_addr(self, line, state):
"""
Parses the INDIvidual <ADDRESS_STRUCTURE>
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
n PHON <PHONE_NUMBER> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
free_form = line.data
sub_state = CurrentState(level=state.level + 1)
sub_state.addr = Address()
self.__parse_level(sub_state, self.parse_addr_tbl, self.__ignore)
state.msg += sub_state.msg
self.__merge_address(free_form, sub_state.addr, line, state)
state.person.add_address(sub_state.addr)
def __person_phon(self, line, state):
"""
n PHON <PHONE_NUMBER> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
addr = Address()
addr.set_street("Unknown")
addr.set_phone(line.data)
state.person.add_address(addr)
self.__skip_subordinate_levels(state.level+1, state)
def __person_email(self, line, state):
"""
O INDI
1 EMAIL <EMAIL> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
url = Url()
url.set_path(line.data)
url.set_type(UrlType(UrlType.EMAIL))
state.person.add_url(url)
def __person_url(self, line, state):
"""
O INDI
1 URL <URL> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
url = Url()
url.set_path(line.data)
url.set_type(UrlType(UrlType.WEB_HOME))
state.person.add_url(url)
def __person_titl(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event = Event()
event_ref = EventRef()
event.set_gramps_id(self.emapper.find_next())
event.set_type(EventType.NOB_TITLE)
event.set_description(line.data)
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level+1
sub_state.event = event
sub_state.event_ref = event_ref
self.__parse_level(sub_state, self.event_parse_tbl, self.__undefined)
state.msg += sub_state.msg
self.dbase.add_event(event, self.trans)
event_ref.ref = event.handle
state.person.add_event_ref(event_ref)
def __person_attr_plac(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.attr.get_value() == "":
state.attr.set_value(line.data)
def __name_type(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data.upper() in ("_OTHN", "_AKA", "AKA", "AKAN"):
state.name.set_type(NameType.AKA)
elif line.data.upper() in ("_MAR", "_MARN", "_MARNM", "MARRIED"):
state.name.set_type(NameType.MARRIED)
else:
state.name.set_type((NameType.CUSTOM, line.data))
def __name_date(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.name:
state.name.set_date_object(line.data)
def __name_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.name, state.level+1, state)
def __name_alia(self, line, state):
"""
This parses the illegal (ALIA or ALIAS) or non-standard (_ALIA) GEDCOM
tag as a subsidiary of the NAME tag.
n @XREF:INDI@ INDI {1:1}
+1 NAME <NAME_PERSONAL> {1:1}
+2 NPFX <NAME_PIECE_PREFIX> {0:1}
+2 GIVN <NAME_PIECE_GIVEN> {0:1}
+2 NICK <NAME_PIECE_NICKNAME> {0:1}
+2 SPFX <NAME_PIECE_SURNAME_PREFIX> {0:1}
+2 SURN <NAME_PIECE_SURNAME> {0:1}
+2 NSFX <NAME_PIECE_SUFFIX> {0:1}
+2 <ALIA> <NAME_PERSONAL> {1:1}
+3 NPFX <NAME_PIECE_PREFIX> {0:1}
+3 GIVN <NAME_PIECE_GIVEN> {0:1}
+3 NICK <NAME_PIECE_NICKNAME> {0:1}
+3 SPFX <NAME_PIECE_SURNAME_PREFIX> {0:1}
+3 SURN <NAME_PIECE_SURNAME> {0:1}
+3 NSFX <NAME_PIECE_SUFFIX> {0:1}
+3 <<SOURCE_CITATION>> {0:M}
+4 <<NOTE_STRUCTURE>> {0:M}
+4 <<MULTIMEDIA_LINK>> {0:M}
+3 <<NOTE_STRUCTURE>> {0:M}
+2 <<SOURCE_CITATION>> {0:M}
+3 <<NOTE_STRUCTURE>> {0:M}
+3 <<MULTIMEDIA_LINK>> {0:M}
+2 <<NOTE_STRUCTURE>> {0:M}
Note that the subsidiary name structure detail will overwrite the ALIA
name (if the same elements are provided in both), so the names should
match.
"2 _ALIA" is used for example, by PRO-GEN v 3.0a and "2 ALIA" is used
by GTEdit and Brother's keeper 5.2 for windows. It had been supported in
previous versions of Gramps but as it was probably incorrectly coded as
it would only work if the name started with '@'.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_alias_name(line, state)
def __name_npfx(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.name.set_title(line.data.strip())
self.__skip_subordinate_levels(state.level+1, state)
def __name_givn(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.name.set_first_name(line.data.strip())
self.__skip_subordinate_levels(state.level+1, state)
def __name_spfx(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.name.get_surname_list():
state.name.get_surname_list()[0].set_prefix(line.data.strip())
else:
surn = Surname()
surn.set_prefix(line.data.strip())
surn.set_primary()
state.name.set_surname_list([surn])
self.__skip_subordinate_levels(state.level+1, state)
def __name_surn(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.name.get_surname_list():
state.name.get_surname_list()[0].set_surname(line.data.strip())
else:
surn = Surname()
surn.set_surname(line.data.strip())
surn.set_primary()
state.name.set_surname_list([surn])
self.__skip_subordinate_levels(state.level+1, state)
def __name_marnm(self, line, state):
"""
This is non-standard GEDCOM. _MARNM is reported to be used in Ancestral
Quest and Personal Ancestral File 5. This will also handle a usage which
has been found in Brother's Keeper (BROSKEEP VERS 6.1.31 WINDOWS) as
follows:
0 @I203@ INDI
1 NAME John Richard/Doe/
2 _MARN Some Other Name
3 DATE 27 JUN 1817
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
text = line.data.strip()
data = text.split()
if len(data) == 1:
name = Name(state.person.primary_name)
surn = Surname()
surn.set_surname(data[0].strip())
surn.set_primary()
name.set_surname_list([surn])
name.set_type(NameType.MARRIED)
state.person.add_alternate_name(name)
elif len(data) > 1:
name = self.__parse_name_personal(text)
name.set_type(NameType.MARRIED)
state.person.add_alternate_name(name)
def __name_nsfx(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.name.get_suffix() == "" or \
state.name.get_suffix() == line.data:
#suffix might be set before when parsing name string
state.name.set_suffix(line.data)
else:
#previously set suffix different, to not loose information, append
state.name.set_suffix(state.name.get_suffix() + ' ' + line.data)
self.__skip_subordinate_levels(state.level+1, state)
def __name_nick(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.name.set_nick_name(line.data.strip())
self.__skip_subordinate_levels(state.level+1, state)
def __name_aka(self, line, state):
"""
This parses the non-standard GEDCOM tags _AKA or _AKAN as a subsidiary
to the NAME tag, which is reported to have been found in Ancestral Quest
and Personal Ancestral File 4 and 5. Note: example AQ and PAF files have
separate 2 NICK and 2 _AKA lines for the same person. The NICK will be
stored by Gramps in the nick_name field of the name structure, while the
_AKA, if it is a single word, will be stored in the NICKNAME attribute.
If more than one word it is stored as an AKA alternate name.
This will also handle a usage which has been found in in Brother's
Keeper (BROSKEEP VERS 6.1.31 WINDOWS) as follows:
0 @I203@ INDI
1 NAME John Richard/Doe/
2 _AKAN Some Other Name
3 DATE 27 JUN 1817
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
lname = line.data.split()
name_len = len(lname)
if name_len == 1:
attr = Attribute()
attr.set_type(AttributeType.NICKNAME)
attr.set_value(line.data)
state.person.add_attribute(attr)
else:
name = Name()
surname = Surname()
surname.set_surname(lname[-1].strip())
surname.set_primary()
name.set_surname_list([surname])
name.set_first_name(' '.join(lname[0:name_len-1]))
# name = self.__parse_name_personal(line.data)
name.set_type(NameType.AKA)
state.person.add_alternate_name(name)
def __name_adpn(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
text = line.data.strip()
data = text.split()
if len(data) == 1:
name = Name(state.person.primary_name)
surn = Surname()
surn.set_surname(data[0].strip())
surn.set_primary()
name.set_surname_list([surn])
name.set_type((NameType.CUSTOM, "Adopted"))
state.person.add_alternate_name(name)
elif len(data) > 1:
name = self.__parse_name_personal(text)
name.set_type((NameType.CUSTOM, "Adopted"))
state.person.add_alternate_name(name)
def __name_sour(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
citation_handle = self.handle_source(line, state.level, state)
state.name.add_citation(citation_handle)
def __person_std_attr(self, line, state):
"""
Parses an TOKEN that GRAMPS recognizes as an Attribute
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.person = state.person
sub_state.attr = line.data
sub_state.level = state.level+1
state.person.add_attribute(sub_state.attr)
self.__parse_level(sub_state, self.person_attr_parse_tbl,
self.__ignore)
state.msg += sub_state.msg
def __person_fact(self, line, state):
"""
Parses an TOKEN that GRAMPS recognizes as an Attribute
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.person = state.person
sub_state.attr = Attribute()
sub_state.attr.set_value(line.data)
sub_state.level = state.level+1
state.person.add_attribute(sub_state.attr)
self.__parse_level(sub_state, self.person_fact_parse_tbl,
self.__ignore)
state.msg += sub_state.msg
def __person_fact_type(self, line, state):
state.attr.set_type(line.data)
def __person_bapl(self, line, state):
"""
Parses an BAPL TOKEN, producing a GRAMPS LdsOrd instance
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.build_lds_ord(state, LdsOrd.BAPTISM)
def __person_conl(self, line, state):
"""
Parses an CONL TOKEN, producing a GRAMPS LdsOrd instance
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.build_lds_ord(state, LdsOrd.CONFIRMATION)
def __person_endl(self, line, state):
"""
Parses an ENDL TOKEN, producing a GRAMPS LdsOrd instance
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.build_lds_ord(state, LdsOrd.ENDOWMENT)
def __person_slgc(self, line, state):
"""
Parses an SLGC TOKEN, producing a GRAMPS LdsOrd instance
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.build_lds_ord(state, LdsOrd.SEAL_TO_PARENTS)
def build_lds_ord(self, state, lds_type):
"""
Parses an LDS ordinance, using the type passed to the routine
@param state: The current state
@type state: CurrentState
@param lds_type: The type of the LDS ordinance
@type line: LdsOrd type
"""
sub_state = CurrentState()
sub_state.level = state.level + 1
sub_state.lds_ord = LdsOrd()
sub_state.lds_ord.set_type(lds_type)
sub_state.place = None
sub_state.place_fields = PlaceParser()
sub_state.person = state.person
state.person.lds_ord_list.append(sub_state.lds_ord)
self.__parse_level(sub_state, self.lds_parse_tbl, self.__ignore)
state.msg += sub_state.msg
if sub_state.place:
sub_state.place_fields.load_place(self.place_import,
sub_state.place,
sub_state.place.get_title())
def __lds_temple(self, line, state):
"""
Parses the TEMP tag, looking up the code for a match.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
value = self.__extract_temple(line)
if value:
state.lds_ord.set_temple(value)
def __lds_date(self, line, state):
"""
Parses the DATE tag for the LdsOrd
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.lds_ord.set_date_object(line.data)
def __lds_famc(self, line, state):
"""
Parses the FAMC tag attached to the LdsOrd
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
gid = self.fid_map[line.data]
state.lds_ord.set_family_handle(self.__find_family_handle(gid))
def __lds_form(self, line, state):
"""
Parses the FORM tag thate defines the place structure for a place.
This tag, if found, will override any global place structure.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.pf = PlaceParser(line)
def __lds_plac(self, line, state):
"""
Parses the PLAC tag attached to the LdsOrd. Create a new place if
needed and set the title.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
try:
state.place = self.__find_or_create_place(line.data)
state.place.set_title(line.data)
state.lds_ord.set_place_handle(state.place.handle)
except NameError:
return
def __lds_sour(self, line, state):
"""
Parses the SOUR tag attached to the LdsOrd.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
citation_handle = self.handle_source(line, state.level, state)
state.lds_ord.add_citation(citation_handle)
def __lds_note(self, line, state):
"""
Parses the NOTE tag attached to the LdsOrd.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.lds_ord, state.level+1, state)
def __lds_stat(self, line, state):
"""
Parses the STAT (status) tag attached to the LdsOrd.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
status = LDS_STATUS.get(line.data, LdsOrd.STATUS_NONE)
state.lds_ord.set_status(status)
def __person_famc(self, line, state):
"""
Handles the parsing of the FAMC line, which indicates which family the
person is a child of.
n FAMC @<XREF:FAM>@ {1:1}
+1 PEDI <PEDIGREE_LINKAGE_TYPE> {0:M} p.*
+1 <<NOTE_STRUCTURE>> {0:M} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level + 1
sub_state.ftype = None
sub_state.primary = False
gid = self.fid_map[line.data]
handle = self.__find_family_handle(gid)
self.__parse_level(sub_state, self.famc_parse_tbl, self.__undefined)
state.msg += sub_state.msg
# if the handle is not already in the person's parent family list, we
# need to add it to thie list.
flist = [fam[0] for fam in state.person.get_parent_family_handle_list()]
if not handle in flist:
if sub_state.ftype and int(sub_state.ftype) in RELATION_TYPES:
state.person.add_parent_family_handle(handle)
else:
if state.person.get_main_parents_family_handle() == handle:
state.person.set_main_parent_family_handle(None)
state.person.add_parent_family_handle(handle)
# search childrefs
family, new = self.dbase.find_family_from_handle(handle, self.trans)
family.set_gramps_id(gid)
for ref in family.get_child_ref_list():
if ref.ref == state.person.handle:
if sub_state.ftype:
ref.set_mother_relation(sub_state.ftype)
ref.set_father_relation(sub_state.ftype)
break
else:
ref = ChildRef()
ref.ref = state.person.handle
if sub_state.ftype:
ref.set_mother_relation(sub_state.ftype)
ref.set_father_relation(sub_state.ftype)
family.add_child_ref(ref)
self.dbase.commit_family(family, self.trans)
def __person_famc_pedi(self, line, state):
"""
Parses the PEDI tag attached to a INDI.FAMC record. No values are set
at this point, because we have to do some post processing. Instead, we
assign the ftype field of the state variable. We convert the text from
the line to an index into the PEDIGREE_TYPES dictionary, which will map
to the correct ChildTypeRef.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.ftype = PEDIGREE_TYPES.get(line.data.lower(),
ChildRefType.UNKNOWN)
def __person_famc_note(self, line, state):
"""
Parses the INDI.FAMC.NOTE tag .
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.person, state.level+1, state)
def __person_famc_primary(self, line, state):
"""
Parses the _PRIM tag on an INDI.FAMC tag. This value is stored in
the state record to be used later.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.primary = True
def __person_famc_sour(self, line, state):
"""
Parses the SOUR tag on an INDI.FAMC tag. GRAMPS has no corresponding
record on its family relationship, so we add the source to the Person
record.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
citation_handle = self.handle_source(line, state.level, state)
state.person.add_citation(citation_handle)
def __person_fams(self, line, state):
"""
Parses the INDI.FAMS record, which indicates the family in which the
person is a spouse.
n FAMS @<XREF:FAM>@ {1:1} p.*
+1 <<NOTE_STRUCTURE>> {0:M} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
gid = self.fid_map[line.data]
handle = self.__find_family_handle(gid)
state.person.add_family_handle(handle)
sub_state = CurrentState(level=state.level+1)
sub_state.obj = state.person
self.__parse_level(sub_state, self.opt_note_tbl, self.__ignore)
state.msg += sub_state.msg
def __person_asso(self, line, state):
"""
Parse the ASSO tag, add the referenced person to the person we
are currently parsing. The GEDCOM spec indicates that valid ASSO tag
is:
n ASSO @<XREF:INDI>@ {0:M}
And the sub tags are:
ASSOCIATION_STRUCTURE:=
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
The Gedcom spec notes that the ASSOCIATION_STRUCTURE
can only link to an INDIVIDUAL_RECORD
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# find the id and person that we are referencing
handle = self.__find_person_handle(self.pid_map[line.data])
# create a new PersonRef, and assign the handle, add the
# PersonRef to the active person
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level + 1
sub_state.ref = PersonRef()
sub_state.ref.ref = handle
sub_state.ignore = False
self.__parse_level(sub_state, self.asso_parse_tbl, self.__ignore)
state.msg += sub_state.msg
if not sub_state.ignore:
state.person.add_person_ref(sub_state.ref)
def __person_asso_rela(self, line, state):
"""
Parses the INDI.ASSO.RELA tag.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.ref.rel = line.data
def __person_asso_sour(self, line, state):
"""
Parses the INDI.ASSO.SOUR tag.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.ref.add_citation(self.handle_source(line, state.level, state))
def __person_asso_note(self, line, state):
"""
Parses the INDI.ASSO.NOTE tag.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.ref, state.level, state)
#-------------------------------------------------------------------
#
# FAM parsing
#
#-------------------------------------------------------------------
def __parse_fam(self, line):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
# create a family
gid = self.fid_map[line.token_text]
family = self.__find_or_create_family(gid)
# parse the family
state = CurrentState(level=1)
state.family = family
self.__parse_level(state, self.family_func, self.__family_even)
# handle addresses attached to families
if state.addr is not None:
father_handle = family.get_father_handle()
father = self.dbase.get_person_from_handle(father_handle)
if father:
father.add_address(state.addr)
self.dbase.commit_person(father, self.trans)
mother_handle = family.get_mother_handle()
mother = self.dbase.get_person_from_handle(mother_handle)
if mother:
mother.add_address(state.addr)
self.dbase.commit_person(mother, self.trans)
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.dbase.get_person_from_handle(child_handle)
if child:
child.add_address(state.addr)
self.dbase.commit_person(child, self.trans)
# add default reference if no reference exists
self.__add_default_source(family)
# Add a default tag if provided
self.__add_default_tag(family)
self.__check_msgs(_("FAM (family) Gramps ID %s") % family.get_gramps_id(),
state, family)
# commit family to database
self.dbase.commit_family(family, self.trans, family.change)
def __family_husb(self, line, state):
"""
Parses the husband line of a family
n HUSB @<XREF:INDI>@ {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
handle = self.__find_person_handle(self.pid_map[line.data])
state.family.set_father_handle(handle)
def __family_wife(self, line, state):
"""
Parses the wife line of a family
n WIFE @<XREF:INDI>@ {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
handle = self.__find_person_handle(self.pid_map[line.data])
state.family.set_mother_handle(handle)
def __family_std_event(self, line, state):
"""
Parses GEDCOM event types that map to a GRAMPS standard type. Additional
parsing required is for the event detail:
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event = line.data
event.set_gramps_id(self.emapper.find_next())
event_ref = EventRef()
event_ref.set_role(EventRoleType.FAMILY)
self.dbase.add_event(event, self.trans)
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level+1
sub_state.event = event
sub_state.event_ref = event_ref
self.__parse_level(sub_state, self.event_parse_tbl, self.__undefined)
state.msg += sub_state.msg
if event.type == EventType.MARRIAGE:
descr = event.get_description()
if descr == "Civil Union":
state.family.type.set(FamilyRelType.CIVIL_UNION)
event.set_description('')
elif descr == "Unmarried":
state.family.type.set(FamilyRelType.UNMARRIED)
event.set_description('')
else:
state.family.type.set(FamilyRelType.MARRIED)
self.dbase.commit_event(event, self.trans)
event_ref.ref = event.handle
state.family.add_event_ref(event_ref)
def __family_even(self, line, state):
"""
Parses GEDCOM event types that map to a GRAMPS standard type. Additional
parsing required is for the event detail:
+1 <<EVENT_DETAIL>> {0:1} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
event = Event()
event_ref = EventRef()
event_ref.set_role(EventRoleType.FAMILY)
event.set_gramps_id(self.emapper.find_next())
event.set_type(line.data)
self.dbase.add_event(event, self.trans)
sub_state = CurrentState()
sub_state.person = state.person
sub_state.level = state.level+1
sub_state.event = event
sub_state.event_ref = event_ref
self.__parse_level(sub_state, self.event_parse_tbl, self.__undefined)
state.msg += sub_state.msg
self.dbase.commit_event(event, self.trans)
event_ref.ref = event.handle
state.family.add_event_ref(event_ref)
def __family_chil(self, line, state):
"""
Parses the child line of a family
n CHIL @<XREF:INDI>@ {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.family = state.family
sub_state.level = state.level + 1
sub_state.mrel = None
sub_state.frel = None
self.__parse_level(sub_state, self.family_rel_tbl, self.__ignore)
state.msg += sub_state.msg
child = self.__find_or_create_person(self.pid_map[line.data])
reflist = [ref for ref in state.family.get_child_ref_list()
if ref.ref == child.handle]
if reflist: # The child has been referenced already
ref = reflist[0]
if sub_state.frel:
ref.set_father_relation(sub_state.frel)
if sub_state.mrel:
ref.set_mother_relation(sub_state.mrel)
# then we will set the order now:
self.set_child_ref_order(state.family, ref)
else:
ref = ChildRef()
ref.ref = child.handle
if sub_state.frel:
ref.set_father_relation(sub_state.frel)
if sub_state.mrel:
ref.set_mother_relation(sub_state.mrel)
state.family.add_child_ref(ref)
def set_child_ref_order(self, family, child_ref):
"""
Sets the child_ref in family.child_ref_list to be in the position
family.child_ref_count. This reorders the children to be in the
order given in the FAM section.
"""
family.child_ref_list.remove(child_ref)
family.child_ref_list.insert(family.child_ref_count, child_ref)
family.child_ref_count += 1
def __family_slgs(self, line, state):
"""
n SLGS {1:1}
+1 STAT <LDS_SPOUSE_SEALING_DATE_STATUS> {0:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.level = state.level + 1
sub_state.lds_ord = LdsOrd()
sub_state.lds_ord.set_type(LdsOrd.SEAL_TO_SPOUSE)
sub_state.place = None
sub_state.family = state.family
sub_state.place_fields = PlaceParser()
state.family.lds_ord_list.append(sub_state.lds_ord)
self.__parse_level(sub_state, self.lds_parse_tbl, self.__ignore)
state.msg += sub_state.msg
if sub_state.place:
sub_state.place_fields.load_place(self.place_import,
sub_state.place,
sub_state.place.get_title())
def __family_source(self, line, state):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1} p.*
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1} p.*
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1} p.*
+1 DATA {0:1}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1} p.*
+1 <<MULTIMEDIA_LINK>> {0:M} p.*, *
+1 <<NOTE_STRUCTURE>> {0:M} p.*
| /* Systems not using source records */
n SOUR <SOURCE_DESCRIPTION> {1:1} p.*
+1 [ CONC | CONT ] <SOURCE_DESCRIPTION> {0:M}
+1 TEXT <TEXT_FROM_SOURCE> {0:M} p.*
+1 <<NOTE_STRUCTURE>> {0:M} p.*
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
citation_handle = self.handle_source(line, state.level, state)
state.family.add_citation(citation_handle)
def __family_object(self, line, state):
"""
+1 <<MULTIMEDIA_LINK>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.family.add_media_reference(ref)
else:
(form, filename, title, note) = self.__obje(state.level + 1, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.family, form, filename, title, note)
def __family_comm(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
note = line.data
state.family.add_note(note)
self.__skip_subordinate_levels(state.level+1, state)
def __family_note(self, line, state):
"""
+1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.family, state.level, state)
def __family_chan(self, line, state):
"""
+1 <<CHANGE_DATE>> {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_change(line, state.family, state.level+1, state)
def __family_attr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.family.add_attribute(line.data)
def __family_cust_attr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(line.token_text)
attr.set_value(line.data)
state.family.add_attribute(attr)
def __obje(self, level, state):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.form = ""
sub_state.filename = ""
sub_state.title = ""
sub_state.note = ""
sub_state.level = level
self.__parse_level(sub_state, self.object_parse_tbl, self.__ignore)
state.msg += sub_state.msg
return (sub_state.form, sub_state.filename, sub_state.title,
sub_state.note)
def __object_ref_form(self, line, state):
"""
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.form = line.data
def __object_ref_titl(self, line, state):
"""
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.title = line.data
def __object_ref_file(self, line, state):
"""
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.filename = line.data
def __object_ref_note(self, line, state):
"""
+1 <<NOTE_STRUCTURE>> {0:M}
TODO: Fix this for full reference
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.note = line.data
def __family_adopt(self, line, state):
"""
n ADOP
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.frel = TYPE_ADOPT
state.mrel = TYPE_ADOPT
def __family_frel(self, line, state):
"""
The _FREL key is a FTW/FTM specific extension to indicate father/child
relationship.
n _FREL <type>
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.frel = PEDIGREE_TYPES.get(line.data.strip().lower())
def __family_mrel(self, line, state):
"""
The _MREL key is a FTW/FTM specific extension to indicate father/child
relationship.
n _MREL <type>
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.mrel = PEDIGREE_TYPES.get(line.data.strip().lower())
def __family_stat(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.mrel = TYPE_BIRTH
state.frel = TYPE_BIRTH
def __event_object(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.event.add_media_reference(ref)
else:
(form, filename, title, note) = self.__obje(state.level + 1, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.event, form, filename, title, note)
def __event_type(self, line, state):
"""
Parses the TYPE line for an event.
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.event.get_type().is_custom():
if line.data in GED_TO_GRAMPS_EVENT:
name = EventType(GED_TO_GRAMPS_EVENT[line.data])
else:
val = self.gedsource.tag2gramps(line.data)
if val:
name = EventType((EventType.CUSTOM, val))
else:
try:
name = EventType((EventType.CUSTOM,
line.data))
except AttributeError:
name = EventType(EventType.UNKNOWN)
state.event.set_type(name)
else:
try:
if line.data not in GED_TO_GRAMPS_EVENT and \
line.data[0] != 'Y':
state.event.set_description(line.data)
except IndexError:
return
def __event_date(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.event.set_date_object(line.data)
def __event_place(self, line, state):
"""
Parse the place portion of a event. A special case has to be made for
Family Tree Maker, which violates the GEDCOM spec. It uses the PLAC
field to store the description or value associated with the event.
n PLAC <PLACE_VALUE> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.is_ftw and state.event.type in FTW_BAD_PLACE:
state.event.set_description(line.data)
else:
# It is possible that we have already got an address structure
# associated with this event. In that case, we will remember the
# location to re-insert later, and set the place as the place name
# and primary location
place_handle = state.event.get_place_handle()
if place_handle:
place = self.dbase.get_place_from_handle(place_handle)
else:
place = self.__find_or_create_place(line.data)
place.set_title(line.data)
state.event.set_place_handle(place.handle)
sub_state = CurrentState()
sub_state.place = place
sub_state.level = state.level+1
sub_state.pf = PlaceParser()
self.__parse_level(sub_state, self.event_place_map,
self.__undefined)
state.msg += sub_state.msg
sub_state.pf.load_place(self.place_import, place, place.get_title())
self.dbase.commit_place(place, self.trans)
def __event_place_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.place, state.level+1, state)
def __event_place_form(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.pf = PlaceParser(line)
def __event_place_object(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.place.add_media_reference(ref)
else:
# FIXME this should probably be level+1
(form, filename, title, note) = self.__obje(state.level, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.place, form, filename, title, note)
def __event_place_sour(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.place.add_citation(self.handle_source(line, state.level, state))
def __place_map(self, line, state):
"""
n MAP
n+1 LONG <PLACE_LONGITUDE>
n+1 LATI <PLACE_LATITUDE>
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState()
sub_state.level = state.level + 1
sub_state.place = state.place
self.__parse_level(sub_state, self.place_map_tbl, self.__undefined)
state.msg += sub_state.msg
state.place = sub_state.place
def __place_lati(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.place.set_latitude( line.data)
def __place_long(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.place.set_longitude( line.data)
def __event_addr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
free_form = line.data
sub_state = CurrentState(level=state.level+1)
sub_state.location = Location()
sub_state.note = []
sub_state.event = state.event
self.__parse_level(sub_state, self.parse_loc_tbl, self.__undefined)
state.msg += sub_state.msg
self.__merge_address(free_form, sub_state.location, line, state)
location = sub_state.location
note_list = sub_state.note
place_handle = state.event.get_place_handle()
if place_handle:
place = self.dbase.get_place_from_handle(place_handle)
else:
place = self.__find_or_create_place(line.data)
place.set_title(line.data)
place_handle = place.handle
self.__add_location(place, location)
list(map(place.add_note, note_list))
state.event.set_place_handle(place_handle)
self.dbase.commit_place(place, self.trans)
def __add_location(self, place, location):
"""
@param place: A place object we have found or created
@type place: Place
@param location: A location we want to add to this place
@type location: gen.lib.location
"""
for loc in place.get_alternate_locations():
if loc.is_equivalent(location) == IDENTICAL:
return
place.add_alternate_locations(location)
def __event_phon(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
place_handle = state.event.get_place_handle()
if place_handle:
place = self.dbase.get_place_from_handle(place_handle)
codes = [place.get_code(), line.data]
place.set_code(' '.join(code for code in codes if code))
self.dbase.commit_place(place, self.trans)
def __event_privacy(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.event.set_privacy(True)
def __event_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.event, state.level+1, state)
def __event_inline_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data[0:13] == "Description: ":
state.event.set_description(line.data[13:])
else:
if not line.data:
# empty: discard, with warning and skip subs
# Note: level+2
self.__add_msg(_("Empty event note ignored"), line, state)
self.__skip_subordinate_levels(state.level+2, state)
else:
new_note = Note(line.data)
new_note.set_handle(create_id())
self.dbase.add_note(new_note, self.trans)
self.__skip_subordinate_levels(state.level+2, state)
state.event.add_note(new_note.get_handle())
def __event_source(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.event.add_citation(self.handle_source(line, state.level, state))
def __event_rin(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(line.token_text)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __event_attr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.event.add_attribute(line.data)
def __event_email(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(line.token_text)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __event_www(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(line.token_text)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __event_cause(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(AttributeType.CAUSE)
attr.set_value(line.data)
state.event.add_attribute(attr)
sub_state = CurrentState()
sub_state.event = state.event
sub_state.level = state.level + 1
sub_state.attr = attr
self.__parse_level(sub_state, self.event_cause_tbl, self.__undefined)
state.msg += sub_state.msg
def __event_cause_source(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.attr.add_citation(self.handle_source(line, state.level, state))
def __event_age(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(AttributeType.AGE)
attr.set_value(line.data)
state.event_ref.add_attribute(attr)
def __event_husb(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, state.level):
break
elif line.token == TOKEN_AGE:
attr = Attribute()
attr.set_type(AttributeType.FATHER_AGE)
attr.set_value(line.data)
state.event_ref.add_attribute(attr)
elif line.token == TOKEN_WIFE:
#wife event can be on same level, if so call it and finish
self.__event_wife(line, state)
break
def __event_wife(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, state.level):
break
elif line.token == TOKEN_AGE:
attr = Attribute()
attr.set_type(AttributeType.MOTHER_AGE)
attr.set_value(line.data)
state.event_ref.add_attribute(attr)
elif line.token == TOKEN_HUSB:
#husband event can be on same level, if so call it and finish
self.__event_husb(line, state)
break
def __event_agnc(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(AttributeType.AGENCY)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __event_time(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if hasattr(state, 'event'):
#read in time as attribute of event
attr = Attribute()
attr.set_type(AttributeType.TIME)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __event_witness(self, line, state):
"""
Parse the witness of an event
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == "@":
# n _WITN @<XREF:INDI>@
# +1 TYPE <TYPE_OF_RELATION>
assert( state.event.handle) # event handle is required to be set
wit = self.__find_or_create_person(self.pid_map[line.data])
event_ref = EventRef()
event_ref.set_reference_handle(state.event.handle)
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, state.level+1):
break
elif line.token == TOKEN_TYPE:
if line.data in ("WITNESS_OF_MARRIAGE"):
role = EventRoleType(
EventRoleType.WITNESS)
else:
role = EventRoleType(
(EventRoleType.CUSTOM, line.data))
event_ref.set_role(role)
wit.add_event_ref(event_ref)
self.dbase.commit_person(wit, self.trans)
else:
# n _WITN <TEXTUAL_LIST_OF_NAMES>
attr = Attribute()
attr.set_type(AttributeType.WITNESS)
attr.set_value(line.data)
state.event.add_attribute(attr)
def __person_adopt_famc(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
gid = self.fid_map[line.data]
handle = self.__find_family_handle(gid)
family = self.__find_or_create_family(gid)
sub_state = CurrentState(level=state.level+1)
sub_state.mrel = TYPE_BIRTH
sub_state.frel = TYPE_BIRTH
self.__parse_level(sub_state, self.parse_person_adopt,
self.__undefined)
state.msg += sub_state.msg
if (int(sub_state.mrel) == ChildRefType.BIRTH and
int(sub_state.frel) == ChildRefType.BIRTH):
sub_state.mrel = sub_state.frel = TYPE_ADOPT
if state.person.get_main_parents_family_handle() == handle:
state.person.set_main_parent_family_handle(None)
state.person.add_parent_family_handle(handle)
reflist = [ref for ref in family.get_child_ref_list()
if ref.ref == state.person.handle]
if reflist:
ref = reflist[0]
ref.set_father_relation(sub_state.frel)
ref.set_mother_relation(sub_state.mrel)
else:
ref = ChildRef()
ref.ref = state.person.handle
ref.set_father_relation(sub_state.frel)
ref.set_mother_relation(sub_state.mrel)
family.add_child_ref(ref)
self.dbase.commit_family(family, self.trans)
def __person_adopt_famc_adopt(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data.strip() == "HUSB":
state.frel = TYPE_ADOPT
elif line.data.strip() == "WIFE":
state.mrel = TYPE_ADOPT
else:
state.mrel = TYPE_ADOPT
state.frel = TYPE_ADOPT
def __person_birth_famc(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
handle = self.__find_family_handle(self.fid_map[line.data])
if state.person.get_main_parents_family_handle() == handle:
state.person.set_main_parent_family_handle(None)
state.person.add_parent_family_handle(handle)
frel = mrel = ChildRefType.BIRTH
family, new = self.dbase.find_family_from_handle(handle, self.trans)
reflist = [ref for ref in family.get_child_ref_list()
if ref.ref == state.person.handle]
if reflist:
ref = reflist[0]
ref.set_father_relation(frel)
ref.set_mother_relation(mrel)
else:
ref = ChildRef()
ref.ref = state.person.handle
ref.set_father_relation(frel)
ref.set_mother_relation(mrel)
family.add_child_ref(ref)
self.dbase.commit_family(family, self.trans)
def __address_date(self, line, state):
"""
Parses the DATE line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_date_object(line.data)
def __address_adr1(self, line, state):
"""
Parses the ADR1 line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# The ADDR may already have been parsed by the level above
assert state.addr.get_street() == ""
if state.addr.get_street() != "":
self.__add_msg(_("Warn: ADDR overwritten"), line, state)
state.addr.set_street(line.data)
def __address_adr2(self, line, state):
"""
Parses the ADR2 line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_locality(line.data)
def __address_city(self, line, state):
"""
Parses the CITY line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_city(line.data)
def __address_state(self, line, state):
"""
Parses the STAE line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_state(line.data)
def __address_post(self, line, state):
"""
Parses the POST line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_postal_code(line.data)
def __address_country(self, line, state):
"""
Parses the country line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.set_country(line.data)
def __address_sour(self, line, state):
"""
Parses the SOUR line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.addr.add_citation(self.handle_source(line, state.level, state))
def __address_note(self, line, state):
"""
Parses the NOTE line of an ADDR tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.addr, state.level+1, state)
def __citation_page(self, line, state):
"""
Parses the PAGE line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.citation.set_page(line.data)
def __citation_date(self, line, state):
"""
Parses the DATE line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.citation.set_date_object(line.data)
def __citation_data(self, line, state):
"""
Parses the DATA line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState(level=state.level+1)
sub_state.citation = state.citation
self.__parse_level(sub_state, self.citation_data_tbl, self.__undefined)
state.msg += sub_state.msg
def __citation_data_date(self, line, state):
state.citation.set_date_object(line.data)
def __source_text(self, line, state):
note = Note()
note.set(line.data)
gramps_id = self.dbase.find_next_note_gramps_id()
note.set_gramps_id(gramps_id)
note.set_type(NoteType.SOURCE_TEXT)
self.dbase.add_note(note, self.trans)
state.source.add_note(note.get_handle())
def __citation_data_text(self, line, state):
note = Note()
note.set(line.data)
gramps_id = self.dbase.find_next_note_gramps_id()
note.set_gramps_id(gramps_id)
note.set_type(NoteType.SOURCE_TEXT)
self.dbase.add_note(note, self.trans)
state.citation.add_note(note.get_handle())
def __citation_data_note(self, line, state):
self.__parse_note(line, state.citation, state.level, state)
def __citation_obje(self, line, state):
"""
Parses the OBJE line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.citation.add_media_reference(ref)
else:
(form, filename, title, note) = self.__obje(state.level+1, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.citation, form, filename, title, note)
def __citation_refn(self, line, state):
"""
Parses the REFN line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("REFN ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __citation_even(self, line, state):
"""
Parses the EVEN line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sattr = SrcAttribute()
sattr.set_type("EVEN")
sattr.set_value(line.data)
state.citation.add_attribute(sattr)
sub_state = CurrentState(level=state.level+1)
sub_state.citation = state.citation
self.__parse_level(sub_state, self.citation_even_tbl, self.__undefined)
state.msg += sub_state.msg
def __citation_even_role(self, line, state):
"""
Parses the EVEN line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sattr = SrcAttribute()
sattr.set_type("EVEN:ROLE")
sattr.set_value(line.data)
state.citation.add_attribute(sattr)
def __citation_quay(self, line, state):
"""
Parses the QUAY line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
try:
val = int(line.data)
except ValueError:
return
# If value is greater than 3, cap at 3
val = min(val, 3)
if val > 1:
state.citation.set_confidence_level(val+1)
else:
state.citation.set_confidence_level(val)
def __citation_note(self, line, state):
"""
Parses the NOTE line of an SOUR instance tag
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.citation, state.level+1, state)
#----------------------------------------------------------------------
#
# SOUR parsing
#
#----------------------------------------------------------------------
def __parse_source(self, name, level):
"""
n @<XREF:SOUR>@ SOUR {1:1}
+1 DATA {0:1}
+2 EVEN <EVENTS_RECORDED> {0:M}
+3 DATE <DATE_PERIOD> {0:1}
+3 PLAC <SOURCE_JURISDICTION_PLACE> {0:1}
+2 AGNC <RESPONSIBLE_AGENCY> {0:1}
+2 <<NOTE_STRUCTURE>> {0:M}
+1 AUTH <SOURCE_ORIGINATOR> {0:1}
+1 TITL <SOURCE_DESCRIPTIVE_TITLE> {0:1}
+1 ABBR <SOURCE_FILED_BY_ENTRY> {0:1}
+1 PUBL <SOURCE_PUBLICATION_FACTS> {0:1}
+1 TEXT <TEXT_FROM_SOURCE> {0:1}
+1 <<SOURCE_REPOSITORY_CITATION>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
state = CurrentState()
state.source = self.__find_or_create_source(self.sid_map[name])
# SOURce with the given gramps_id had no title
state.source.set_title(_("No title - ID %s") %
state.source.get_gramps_id())
state.level = level
self.__parse_level(state, self.source_func, self.__undefined)
self.__check_msgs(_("SOUR (source) Gramps ID %s") %
state.source.get_gramps_id(),
state, state.source)
self.dbase.commit_source(state.source, self.trans, state.source.change)
def __source_attr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sattr = SrcAttribute()
sattr.set_type(line.token_text)
sattr.set_value(line.data)
state.source.add_attribute(sattr)
def __source_object(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# Reference to a named multimedia object defined elsewhere
gramps_id = self.oid_map[line.data]
handle = self.__find_object_handle(gramps_id)
ref = MediaRef()
ref.set_reference_handle(handle)
state.source.add_media_reference(ref)
else:
(form, filename, title, note) = self.__obje(state.level+1, state)
if filename == "":
self.__add_msg(_("Filename omitted"), line, state)
if form == "":
self.__add_msg(_("Form omitted"), line, state)
self.build_media_object(state.source, form, filename, title, note)
def __source_chan(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_change(line, state.source, state.level+1, state)
def __source_undef(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__not_recognized(line, state.level+1, state)
def __source_repo(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if line.data and line.data[0] == '@':
# This deals with the standard GEDCOM
# SOURCE_REPOSITORY_CITATION: =
# n REPO @<XREF:REPO>@ {1:1}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 CALN <SOURCE_CALL_NUMBER> {0:M}
# +2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
gid = self.rid_map[line.data]
repo = self.__find_or_create_repository(gid)
elif line.data == '':
# This deals with the non-standard GEDCOM format found in Family
# Tree Maker for Windows, Broderbund Software, Banner Blue
# Division:
# SOURCE_REPOSITORY_CITATION: =
# n REPO {1:1}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 CALN <SOURCE_CALL_NUMBER> {0:M}
# +2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
#
# This format has no repository name. See http://west-
# penwith.org.uk/misc/ftmged.htm which points out this is
# incorrect
gid = self.dbase.find_next_repository_gramps_id()
repo = self.__find_or_create_repository(gid)
self.dbase.commit_repository(repo, self.trans)
else:
# This deals with the non-standard GEDCOM
# SOURCE_REPOSITORY_CITATION: =
# n REPO <NAME_OF_REPOSITORY> {1:1}
# +1 <<NOTE_STRUCTURE>> {0:M}
# +1 CALN <SOURCE_CALL_NUMBER> {0:M}
# +2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
# This seems to be used by Heredis 8 PC. Heredis is notorious for
# non-standard GEDCOM.
gid = self.repo2id.get(line.data)
if gid is None:
gid = self.dbase.find_next_repository_gramps_id()
repo = self.__find_or_create_repository(gid)
self.repo2id[line.data] = repo.get_gramps_id()
repo.set_name(line.data)
self.dbase.commit_repository(repo, self.trans)
repo_ref = RepoRef()
repo_ref.set_reference_handle(repo.handle)
sub_state = CurrentState()
sub_state.repo_ref = repo_ref
sub_state.level = state.level + 1
self.__parse_level(sub_state, self.repo_ref_tbl, self.__undefined)
state.msg += sub_state.msg
state.source.add_repo_reference(repo_ref)
def __repo_ref_call(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.repo_ref.set_call_number(line.data)
#self.__skip_subordinate_levels(state.level+1, state)
def __repo_ref_medi(self, line, state):
name = line.data
mtype = MEDIA_MAP.get(name.lower(),
(SourceMediaType.CUSTOM, name))
state.repo_ref.set_media_type(mtype)
def __repo_ref_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.repo_ref, state.level+1, state)
def __repo_chan(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_change(line, state.repo, state.level+1, state)
def __source_abbr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.source.set_abbreviation(line.data)
def __source_agnc(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
attr = Attribute()
attr.set_type(AttributeType.AGENCY)
attr.set_value(line.data)
state.source.add_attribute(attr)
def __source_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.source, state.level+1, state)
def __source_auth(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.source.set_author(line.data)
def __source_publ(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.source.set_publication_info(line.data)
self.__skip_subordinate_levels(state.level+1, state)
def __source_title(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.source.set_title(line.data.replace('\n', ' '))
def __source_taxt_peri(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.source.get_title() == "":
state.source.set_title(line.data.replace('\n', ' '))
#----------------------------------------------------------------------
#
# OBJE parsing
#
#----------------------------------------------------------------------
def __parse_obje(self, line):
"""
n @XREF:OBJE@ OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1} p.*
+1 TITL <DESCRIPTIVE_TITLE> {0:1} p.*
+1 <<NOTE_STRUCTURE>> {0:M} p.*
+1 BLOB {1:1}
+2 CONT <ENCODED_MULTIMEDIA_LINE> {1:M} p.*
+1 OBJE @<XREF:OBJE>@ /* chain to continued object */ {0:1} p.*
+1 REFN <USER_REFERENCE_NUMBER> {0:M} p.*
+2 TYPE <USER_REFERENCE_TYPE> {0:1} p.*
+1 RIN <AUTOMATED_RECORD_ID> {0:1} p.*
+1 <<CHANGE_DATE>> {0:1} p.*
"""
gid = line.token_text.strip()
media = self.__find_or_create_object(self.oid_map[gid])
state = CurrentState()
state.media = media
state.level = 1
self.__parse_level(state, self.obje_func, self.__undefined)
# Add the default reference if no source has found
self.__add_default_source(media)
# Add a default tag if provided
self.__add_default_tag(media)
self.__check_msgs(_("OBJE (multi-media object) Gramps ID %s") %
media.get_gramps_id(), state, media)
# commit the person to the database
self.dbase.commit_media_object(media, self.trans, media.change)
def __obje_form(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# TODO: FIX THIS!!!
state.media_form = line.data.strip()
self.__skip_subordinate_levels(state.level+1, state)
def __obje_file(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
res = urlparse(line.data)
if line.data != '' and (res.scheme == '' or res.scheme == 'file'):
(file_ok, filename) = self.__find_file(line.data, self.dir_path)
if state.media != "URL":
if not file_ok:
self.__add_msg(_("Could not import %s") % filename[0], line,
state)
path = filename[0].replace('\\', os.path.sep)
else:
path = line.data
state.media.set_path(path)
state.media.set_mime_type(get_type(path))
if not state.media.get_description():
state.media.set_description(path)
def __obje_title(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.media.set_description(line.data)
def __obje_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.media, state.level+1, state)
def __obje_blob(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("BLOB ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __obje_refn(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("REFN ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __obje_type(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("Multimedia REFN:TYPE ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __obje_rin(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__add_msg(_("Mutimedia RIN ignored"), line, state)
self.__skip_subordinate_levels(state.level+1, state)
def __obje_chan(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_change(line, state.media, state.level+1, state)
def __person_attr_type(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if state.attr.get_type() == "":
if line.data in GED_TO_GRAMPS_EVENT:
name = GED_TO_GRAMPS_EVENT[line.data]
else:
val = self.gedsource.tag2gramps(line.data)
if val:
name = val
else:
name = line.data
state.attr.set_type(name)
else:
self.__ignore(line, state)
def __person_attr_source(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.attr.add_citation(self.handle_source(line, state.level, state))
def __person_attr_place(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
val = line.data
if state.attr.get_value() == "":
state.attr.set_value(val)
self.__skip_subordinate_levels(state.level+1, state)
else:
self.__ignore(line, state)
def __person_attr_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.attr, state.level+1, state)
#----------------------------------------------------------------------
#
# REPO parsing
#
#----------------------------------------------------------------------
def __parse_repo(self, line):
"""
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {0:1} p.*
+1 <<ADDRESS_STRUCTURE>> {0:1} p.*
+1 <<NOTE_STRUCTURE>> {0:M} p.*
+1 REFN <USER_REFERENCE_NUMBER> {0:M} p.*
+1 RIN <AUTOMATED_RECORD_ID> {0:1} p.*
+1 <<CHANGE_DATE>> {0:1} p.
"""
repo = self.__find_or_create_repository(self.rid_map[line.token_text])
state = CurrentState()
state.repo = repo
state.level = 1
self.__parse_level(state, self.repo_parse_tbl, self.__ignore)
self.__check_msgs(_("REPO (repository) Gramps ID %s") %
repo.get_gramps_id(), state, repo)
self.dbase.commit_repository(repo, self.trans, repo.change)
def __repo_name(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.repo.set_name(line.data)
def __repo_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.repo, state.level+1, state)
def __repo_addr(self, line, state):
"""
Parses the REPOsitory and HEADer COPR <ADDRESS_STRUCTURE>
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
n PHON <PHONE_NUMBER> {0:3}
Some repositories do not try to break up the address,
instead they put everything on a single line. Try to determine
if this happened, and try to fix it.
"""
free_form = line.data
sub_state = CurrentState(level=state.level + 1)
sub_state.addr = Address()
self.__parse_level(sub_state, self.parse_addr_tbl, self.__ignore)
state.msg += sub_state.msg
self.__merge_address(free_form, sub_state.addr, line, state)
state.repo.add_address(sub_state.addr)
def __repo_phon(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
address_list = state.repo.get_address_list()
if address_list:
address_list[0].set_phone(line.data)
def __repo_www(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
url = Url()
url.set_path(line.data)
url.set_type(UrlType(UrlType.WEB_HOME))
state.repo.add_url(url)
def __repo_email(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
url = Url()
url.set_path(line.data)
url.set_type(UrlType(UrlType.EMAIL))
state.repo.add_url(url)
def __location_date(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_date_object(line.data)
def __location_adr1(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
if state.location.get_street() != "":
self.__add_msg(_("Warn: ADDR overwritten"), line, state)
state.location.set_street(line.data)
def __location_adr2(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_locality(line.data)
def __location_city(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_city(line.data)
def __location_stae(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_state(line.data)
def __location_post(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_postal_code(line.data)
def __location_ctry(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
state.location.set_country(line.data)
def __location_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if not state.location:
state.location = Location()
if state.event:
self.__parse_note(line, state.event, state.level+1, state)
else:
self.__not_recognized(line, state.level, state)
def __optional_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.__parse_note(line, state.obj, state.level, state)
#----------------------------------------------------------------------
#
# HEAD parsing
#
#----------------------------------------------------------------------
def __parse_header(self):
"""
Handling of the lines subordinate to the HEAD GEDCOM tag
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1}
+3 <<ADDRESS_STRUCTURE>> {0:1}
+2 DATA <NAME_OF_SOURCE_DATA> {0:1}
+3 DATE <PUBLICATION_DATE> {0:1}
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1}
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*}
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @<XREF:SUBM>@ {1:1}
+1 SUBN @<XREF:SUBN>@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
* NOTE: Submissions to the Family History Department for Ancestral
File submission or for clearing temple ordinances must use a
DESTination of ANSTFILE or TempleReady.
"""
state = CurrentState(level=1)
self.__parse_level(state, self.head_parse_tbl, self.__undefined)
self.__check_msgs(_("HEAD (header)"), state, None)
def __header_sour(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.gedsource = self.gedmap.get_from_source_tag(line.data)
if line.data.strip() in ["FTW", "FTM"]:
self.is_ftw = True
# We will use the approved system ID as the name of the generating
# software, in case we do not get the name in the proper place
self.genby = line.data
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_("Approved system identification"))
sattr.set_value("%s" % self.genby)
self.def_src.add_attribute(sattr)
sub_state = CurrentState(level=state.level+1)
self.__parse_level(sub_state, self.header_sour_parse_tbl,
self.__undefined)
state.msg += sub_state.msg
# We can't produce the 'Generated by' statement till the end of the SOUR
# level, because the name and version may come in any order
if self.use_def_src:
# feature request 2356: avoid genitive form
sattr = SrcAttribute()
sattr.set_type(_("Generated By"))
sattr.set_value("%s %s" % (self.genby, self.genvers))
self.def_src.add_attribute(sattr)
def __header_sour_name(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# This is where the name of the product that generated the GEDCOM file
# should appear, and this will overwrite the approved system ID (if any)
self.genby = line.data
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_("Name of software product"))
sattr.set_value(self.genby)
self.def_src.add_attribute(sattr)
def __header_sour_vers(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.genvers = line.data
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_("Version number of software product"))
sattr.set_value(self.genvers)
self.source.add_attribute(sattr)
def __header_sour_corp(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
repo = Repository()
sub_state = CurrentState(level=state.level + 1)
sub_state.repo = repo
self.__parse_level(sub_state, self.header_corp_addr, self.__undefined)
state.msg += sub_state.msg
if self.use_def_src:
repo.set_name(_("Business that produced the product: %s") %
line.data)
rtype = RepositoryType()
rtype.set((RepositoryType.CUSTOM, _('GEDCOM data')))
repo.set_type(rtype)
self.dbase.add_repository(repo, self.trans)
repo_ref = RepoRef()
repo_ref.set_reference_handle(repo.handle)
mtype = SourceMediaType()
mtype.set((SourceMediaType.UNKNOWN, ''))
repo_ref.set_media_type(mtype)
self.def_src.add_repo_reference(repo_ref)
def __header_sour_data(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_("Name of source data"))
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
sub_state = CurrentState(level=state.level+1)
self.__parse_level(sub_state, self.header_sour_data,
self.__undefined)
state.msg += sub_state.msg
def __header_sour_copr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_("Copyright of source data"))
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
def __header_sour_date(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
# Because there is a DATE tag, line.data is automatically converted
# to a Date object before getting to this point, so it has to be
# converted back to a string
text_date = str(line.data)
sattr = SrcAttribute()
sattr.set_type(_("Publication date of source data"))
sattr.set_value(text_date)
self.def_src.add_attribute(sattr)
def __header_file(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
filename = os.path.basename(line.data).split('\\')[-1]
# feature request 2356: avoid genitive form
self.def_src.set_title(_("Import from %s") % filename)
def __header_copr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
self.def_src.set_publication_info(line.data)
def __header_subm(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
+1 SUBM @<XREF:SUBM>@ {1:1}
This should be simply be a cross-reference to the correct Submitter
record. Note that there can be multiple Submitter records, so it is
necessary to remember which one should be applied.
"""
self.subm = line.data[1:-1]
sub_state = CurrentState(level=state.level+1)
self.__parse_level(sub_state, self.header_subm, self.__ignore)
state.msg += sub_state.msg
def __header_subn(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_('Submission record identifier'))
sattr.set_value(line.token_text)
self.def_src.add_attribute(sattr)
def __header_lang(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_('Language of GEDCOM text'))
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
def __header_dest(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# FIXME: Gramps does not seem to produce a DEST line, so this processing
# seems to be useless
if self.genby == "GRAMPS":
self.gedsource = self.gedmap.get_from_source_tag(line.data)
# FIXME: This processing does not depend on DEST, so there seems to be
# no reason for it to be placed here. Perhaps it is supposed to be after
# all the SOUR levels have been processed, but self.genby was only
# assigned by the initial SOUR tag, so this could have been done there.
# Perhaps, as suggested by the text of the error message, it was
# supposed to test whenther the_DEST_ was LEGACY, in which case the
# coding is now wrong.
if self.genby.upper() == "LEGACY":
fname = os.path.basename(self.filename)
WarningDialog(
_("Import of GEDCOM file %(filename)s with DEST=%(by)s, "
"could cause errors in the resulting database!")
% {'filename': fname, 'by': self.genby},
_("Look for nameless events.")
)
def __header_char(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
# +1 CHAR <CHARACTER_SET> {1:1}
# +2 VERS <VERSION_NUMBER> {0:1}
encoding = line.data
version = ""
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, state.level+1):
break
elif line.token == TOKEN_VERS:
version = line.data
if self.use_def_src:
if version == "":
sattr = SrcAttribute()
sattr.set_type(_('Character set'))
sattr.set_value(encoding)
self.def_src.add_attribute(sattr)
else:
sattr = SrcAttribute()
sattr.set_type(_('Character set and version'))
sattr.set_value("%s %s" % (encoding, version))
self.def_src.add_attribute(sattr)
def __header_gedc(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, state.level+1):
break
elif line.token == TOKEN_VERS:
if line.data[0] != "5":
self.__add_msg(_("GEDCOM version not supported"),
line, state)
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_('GEDCOM version'))
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
elif line.token == TOKEN_FORM:
if line.data != "LINEAGE-LINKED":
self.__add_msg(_("GEDCOM form not supported"), line, state)
if self.use_def_src:
sattr = SrcAttribute()
sattr.set_type(_('GEDCOM form'))
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
def __header_plac(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
sub_state = CurrentState(level=state.level+1)
self.__parse_level(sub_state, self.place_form, self.__undefined)
state.msg += sub_state.msg
def __place_form(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
self.place_parser.parse_form(line)
def __header_date(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
This processes the <TRANSMISSION_DATE>, i.e. the date when this [GEDCOM]
transmission was created (as opposed to the date when the source data
that was used to create the transmission was published or created
"""
# Because there is a DATE tag, line.data is automatically converted to a
# Date object before getting to this point, so it has to be converted
# back to a string
tx_date = str(line.data)
tx_time = ""
line = self.__get_next_line()
if self.__level_is_finished(line, state.level):
pass
elif line.token == TOKEN_TIME:
tx_time = str(line.data)
if self.use_def_src:
if tx_time == "":
sattr = SrcAttribute()
sattr.set_type(_('Creation date of GEDCOM'))
sattr.set_value(tx_date)
self.def_src.add_attribute(sattr)
else:
sattr = SrcAttribute()
sattr.set_type(_('Creation date and time of GEDCOM'))
sattr.set_value("%s %s" % (tx_date, tx_time))
self.def_src.add_attribute(sattr)
def __header_note(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
self.__parse_note(line, self.def_src, 2, state)
def __header_subm_name(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
if self.use_def_src:
self.def_src.set_author(line.data)
def __parse_note(self, line, obj, level, state):
if line.token == TOKEN_RNOTE:
# reference to a named note defined elsewhere
#NOTE_STRUCTURE: =
# n NOTE @<XREF:NOTE>@ {1:1}
# +1 SOUR @<XREF:SOUR>@ {0:M}
obj.add_note(self.__find_note_handle(self.nid_map[line.data]))
else:
# Embedded note
#NOTE_STRUCTURE: =
# n NOTE [<SUBMITTER_TEXT> | <NULL>] {1:1}
# +1 [ CONC | CONT ] <SUBMITTER_TEXT> {0:M}
# +1 SOUR @<XREF:SOUR>@ {0:M}
if not line.data:
self.__add_msg(_("Empty note ignored"), line, state)
self.__skip_subordinate_levels(level+1, state)
else:
new_note = Note(line.data)
new_note.set_gramps_id(self.nid_map[""])
new_note.set_handle(create_id())
sub_state = CurrentState(level=state.level+1)
sub_state.note = new_note
self.__parse_level(sub_state, self.note_parse_tbl,
self.__undefined)
state.msg += sub_state.msg
# Add a default tag if provided
self.__add_default_tag(new_note)
self.dbase.commit_note(new_note, self.trans, new_note.change)
obj.add_note(new_note.get_handle())
#----------------------------------------------------------------------
#
# NOTE parsing
#
#----------------------------------------------------------------------
def __parse_inline_note(self, line, level):
"""
Handling of lines subordinate to the NOTE GEDCOM tag
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
state = CurrentState(level=1)
if not line.data and \
self.nid_map.clean(line.token_text) not in self.nid_map.map():
self.__add_msg(_("Empty note ignored"), line)
self.__skip_subordinate_levels(level, state)
else:
gid = self.nid_map[line.token_text]
handle = self.__find_note_handle(gid)
new_note = Note(line.data)
new_note.set_handle(handle)
new_note.set_gramps_id(gid)
sub_state = CurrentState(level=state.level)
sub_state.note = new_note
self.__parse_level(sub_state, self.note_parse_tbl, self.__undefined)
state.msg += sub_state.msg
self.dbase.commit_note(new_note, self.trans, new_note.change)
self.__check_msgs(_("NOTE Gramps ID %s") % new_note.get_gramps_id(),
state, None)
def __note_chan(self, line, state):
if state.note:
self.__parse_change(line, state.note, state.level+1, state)
def __parse_source_reference(self, citation, level, handle, state):
"""
Read the data associated with a SOUR reference.
"""
sub_state = CurrentState(level=level+1)
sub_state.citation = citation
sub_state.handle = handle
self.__parse_level(sub_state, self.citation_parse_tbl, self.__ignore)
state.msg += sub_state.msg
def __parse_header_head(self):
"""
Validate that this is a valid GEDCOM file.
"""
line = self.__get_next_line()
if line.token != TOKEN_HEAD:
raise GedcomError("%s is not a GEDCOM file" % self.filename)
def __parse_submission(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
Handling of lines subordinate to the level 0 SUMN (Submission) GEDCOM
tag
n @<XREF:SUBN>@ SUBN {1:1]
+1 SUBM @<XREF:SUBM>@ {0:1}
+1 FAMF <NAME_OF_FAMILY_FILE> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 ANCE <GENERATIONS_OF_ANCESTORS> {0:1}
+1 DESC <GENERATIONS_OF_DESCENDANTS> {0:1}
+1 ORDI <ORDINANCE_PROCESS_FLAG> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
"""
while True:
line = self.__get_next_line()
msg = ""
if self.__level_is_finished(line, state.level+1):
break
elif line.token == TOKEN_SUBM:
msg = _("Submission: Submitter")
elif line.token == TOKEN_UNKNOWN and line.token_text == "FAMF":
msg = _("Submission: Family file")
elif line.token == TOKEN_TEMP:
msg = _("Submission: Temple code")
elif line.token == TOKEN_UNKNOWN and line.token_text == "ANCE":
msg = _("Submission: Generations of ancestors")
elif line.token == TOKEN_UNKNOWN and line.token_text == "DESC":
msg = _("Submission: Generations of descendants")
elif line.token == TOKEN_UNKNOWN and line.token_text == "ORDI":
msg = _("Submission: Ordinance process flag")
else:
self.__not_recognized(line, state.level+1, state)
continue
if self.use_def_src and msg != "":
sattr = SrcAttribute()
sattr.set_type(msg)
sattr.set_value(line.data)
self.def_src.add_attribute(sattr)
def handle_source(self, line, level, state):
"""
Handle the specified source, building a source reference to
the object.
"""
citation = Citation()
if line.data and line.data[0] != "@":
title = line.data
handle = self.inline_srcs.get(title, create_id())
src = Source()
src.handle = handle
src.gramps_id = self.dbase.find_next_source_gramps_id()
self.inline_srcs[title] = handle
else:
src = self.__find_or_create_source(self.sid_map[line.data])
# We need to set the title to the cross reference identifier of the
# SOURce record, just in case we never find the source record. If we
# din't find the source record, then the source object would have
# got deleted by Chack and repair because the record is empty. If we
# find the source record, the title is overwritten in
# __source_title.
src.set_title(line.data)
self.dbase.commit_source(src, self.trans)
self.__parse_source_reference(citation, level, src.handle, state)
citation.set_reference_handle(src.handle)
self.dbase.add_citation(citation, self.trans)
return citation.handle
def __parse_change(self, line, obj, level, state):
"""
CHANGE_DATE:=
> n CHAN {1:1}
> +1 DATE <CHANGE_DATE> {1:1}
> +2 TIME <TIME_VALUE> {0:1}
> +1 <<NOTE_STRUCTURE>> {0:M}
The Note structure is ignored, since we have nothing
corresponding in GRAMPS.
Based on the values calculated, attempt to convert to a valid
change time using time.strptime. If this fails (and it shouldn't
unless the value is meaningless and doesn't conform to the GEDCOM
spec), the value is ignored.
"""
tstr = None
dstr = None
dobj = None
while True:
line = self.__get_next_line()
if self.__level_is_finished(line, level):
break
elif line.token == TOKEN_TIME:
tstr = line.data
elif line.token == TOKEN_DATE:
#Lexer converted already to Date object
dobj = line.data
elif line.token == TOKEN_NOTE:
self.__skip_subordinate_levels(level+1, state)
else:
self.__not_recognized(line, level+1, state)
# Attempt to convert the values to a valid change time
if dobj:
dstr = "%s %s %s" % (dobj.get_day(), dobj.get_month(),
dobj.get_year())
try:
if tstr:
try:
tstruct = time.strptime("%s %s" % (dstr, tstr),
"%d %m %Y %H:%M:%S")
except ValueError:
#seconds is optional in GEDCOM
tstruct = time.strptime("%s %s" % (dstr, tstr),
"%d %m %Y %H:%M")
else:
tstruct = time.strptime(dstr, "%d %m %Y")
val = time.mktime(tstruct)
obj.change = val
except (ValueError, OverflowError):
# parse of time structure failed, so ignore. According to the
# Python manual: "The functions in this [time] module do not
# handle dates and times before the epoch or far in the future.
# The cut-off point in the future is determined by the C
# library; for Unix, it is typically in 2038." If the time is
# too far in the future, this gives OverflowError.
pass
def build_media_object(self, obj, form, filename, title, note):
if isinstance(form, STRTYPE) and form.lower() == "url":
url = Url()
url.set_path(filename)
url.set_description(title)
url.set_type(UrlType.WEB_HOME)
obj.add_url(url)
else:
# to allow import of references to URLs (especially for import from
# geni.com), do not try to find the files if they are blatently URLs
res = urlparse(filename)
if filename != '' and (res.scheme == '' or res.scheme == 'file'):
(valid, path) = self.__find_file(filename, self.dir_path)
if not valid:
self.__add_msg(_("Could not import %s") % filename)
path = filename.replace('\\', os.path.sep)
else:
path = filename
photo_handle = self.media_map.get(path)
if photo_handle is None:
photo = MediaObject()
photo.set_path(path)
photo.set_description(title)
full_path = os.path.abspath(path)
if os.path.isfile(full_path):
photo.set_mime_type(get_type(full_path))
else:
photo.set_mime_type(MIME_MAP.get(form.lower(), 'unknown'))
self.dbase.add_object(photo, self.trans)
self.media_map[path] = photo.handle
else:
photo = self.dbase.get_object_from_handle(photo_handle)
oref = MediaRef()
oref.set_reference_handle(photo.handle)
if note:
gramps_id = self.nid_map[note]
oref.add_note(self.__find_note_handle(gramps_id))
obj.add_media_reference(oref)
def __build_event_pair(self, state, event_type, event_map, description):
"""
n TYPE <EVENT_DESCRIPTOR> {0:1} p.*
n DATE <DATE_VALUE> {0:1} p.*/*
n <<PLACE_STRUCTURE>> {0:1} p.*
n <<ADDRESS_STRUCTURE>> {0:1} p.*
n AGE <AGE_AT_EVENT> {0:1} p.*
n AGNC <RESPONSIBLE_AGENCY> {0:1} p.*
n CAUS <CAUSE_OF_EVENT> {0:1} p.*
n <<SOURCE_CITATION>> {0:M} p.*
n <<MULTIMEDIA_LINK>> {0:M} p.*, *
n <<NOTE_STRUCTURE>> {0:M} p.
"""
event = Event()
event_ref = EventRef()
event.set_gramps_id(self.emapper.find_next())
event.set_type(event_type)
if description and description != 'Y':
event.set_description(description)
self.dbase.add_event(event, self.trans)
sub_state = CurrentState()
sub_state.level = state.level + 1
sub_state.event_ref = event_ref
sub_state.event = event
sub_state.person = state.person
self.__parse_level(sub_state, event_map, self.__undefined)
state.msg += sub_state.msg
self.dbase.commit_event(event, self.trans)
event_ref.set_reference_handle(event.handle)
return event_ref
def __build_family_event_pair(self, state, event_type, event_map,
description):
event = Event()
event_ref = EventRef()
event.set_gramps_id(self.emapper.find_next())
event.set_type(event_type)
if description and description != 'Y':
event.set_description(description)
self.dbase.add_event(event, self.trans)
sub_state = CurrentState()
sub_state.family = state.family
sub_state.level = state.level+1
sub_state.event = event
sub_state.event_ref = event_ref
self.__parse_level(sub_state, event_map, self.__undefined)
state.msg += sub_state.msg
self.dbase.commit_event(event, self.trans)
event_ref.set_reference_handle(event.handle)
return event_ref
def __extract_temple(self, line):
def get_code(code):
if TEMPLES.is_valid_code(code):
return code
elif TEMPLES.is_valid_name(code):
return TEMPLES.code(code)
code = get_code(line.data)
if code:
return code
## Not sure why we do this. Kind of ugly.
code = get_code(line.data.split()[0])
if code:
return code
## Okay we have no clue which temple this is.
## We should tell the user and store it anyway.
self.__add_msg(_("Invalid temple code"), line, None)
return line.data
def __add_default_source(self, obj):
"""
Add the default source to the object.
"""
if self.use_def_src and len(obj.get_citation_list()) == 0:
citation = Citation()
citation.set_reference_handle(self.def_src.handle)
self.dbase.add_citation(citation, self.trans)
obj.add_citation(citation.handle)
def __add_default_tag(self, obj):
"""
Add the default tag to the object.
"""
if self.default_tag:
obj.add_tag(self.default_tag.handle)
def __subm_name(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.res.set_name(line.data)
def __subm_addr(self, line, state):
"""
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
free_form = line.data
sub_state = CurrentState(level=state.level + 1)
sub_state.location = state.res
self.__parse_level(sub_state, self.parse_loc_tbl, self.__undefined)
state.msg += sub_state.msg
self.__merge_address(free_form, state.res, line, state)
# Researcher is a sub-type of LocationBase, so get_street and set_street
# which are used in routines called from self.parse_loc_tbl work fine.
# Unfortunately, Researcher also has get_address and set_address, so we
# need to copy the street into that.
state.res.set_address(state.res.get_street())
def __subm_phon(self, line, state):
"""
n PHON <PHONE_NUMBER> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.res.set_phone(line.data)
def __subm_email(self, line, state):
"""
n EMAIL <ADDRESS_EMAIL> {0:3}
@param line: The current line in GedLine format
@type line: GedLine
@param state: The current state
@type state: CurrentState
"""
state.res.set_email(line.data)
#-------------------------------------------------------------------------
#
# GedcomStageOne
#
#-------------------------------------------------------------------------
class GedcomStageOne(object):
"""
The GedcomStageOne parser scans the file quickly, looking for a few things.
This includes:
1. Character set encoding
2. Number of people and families in the list
3. Child to family references, since Ancestry.com creates GEDCOM files
without the FAMC references.
"""
__BAD_UTF16 = _("Your GEDCOM file is corrupted. "
"The file appears to be encoded using the UTF16 "
"character set, but is missing the BOM marker.")
__EMPTY_GED = _("Your GEDCOM file is empty.")
@staticmethod
def __is_xref_value(value):
"""
Return True if value is in the form of a XREF value. We assume that
if we have a leading '@' character, then we are okay.
"""
return value and value[0] == '@'
def __init__(self, ifile):
self.ifile = ifile
self.famc = defaultdict(list)
self.fams = defaultdict(list)
self.enc = ""
self.pcnt = 0
self.lcnt = 0
def __detect_file_decoder(self, input_file):
"""
Detects the file encoding of the file by looking for a BOM
(byte order marker) in the GEDCOM file. If we detect a UTF-16
encoded file, we must connect to a wrapper using the codecs
package.
"""
line = input_file.read(2)
if line == b"\xef\xbb":
input_file.read(1)
self.enc = "UTF8"
return input_file
elif line == b"\xff\xfe":
self.enc = "UTF16"
input_file.seek(0)
return codecs.EncodedFile(input_file, 'utf8', 'utf16')
elif not line :
raise GedcomError(self.__EMPTY_GED)
elif line[0] == b"\x00" or line[1] == b"\x00":
raise GedcomError(self.__BAD_UTF16)
else:
input_file.seek(0)
return input_file
def parse(self):
"""
Parse the input file.
"""
current_family_id = ""
reader = self.__detect_file_decoder(self.ifile)
for line in reader:
line = line.strip()
if not line:
continue
self.lcnt += 1
data = line.split(None, 2) + ['']
try:
(level, key, value) = data[:3]
value = value.strip()
level = int(level)
key = key.strip()
except:
LOG.warn(_("Invalid line %d in GEDCOM file.") % self.lcnt)
continue
if level == 0 and key[0] == '@':
if value == ("FAM", "FAMILY") :
current_family_id = key.strip()[1:-1]
elif value == ("INDI", "INDIVIDUAL"):
self.pcnt += 1
elif key in ("HUSB", "HUSBAND", "WIFE") and \
self.__is_xref_value(value):
self.fams[value[1:-1]].append(current_family_id)
elif key in ("CHIL", "CHILD") and self.__is_xref_value(value):
self.famc[value[1:-1]].append(current_family_id)
elif key == 'CHAR' and not self.enc:
assert(isinstance(value, STRTYPE))
self.enc = value
def get_famc_map(self):
"""
Return the Person to Child Family map
"""
return self.famc
def get_fams_map(self):
"""
Return the Person to Family map (where the person is a spouse)
"""
return self.fams
def get_encoding(self):
"""
Return the detected encoding
"""
return self.enc.upper()
def set_encoding(self, enc):
"""
Forces the encoding
"""
assert(isinstance(enc, STRTYPE))
self.enc = enc
def get_person_count(self):
"""
Return the number of INDI records found
"""
return self.pcnt
def get_line_count(self):
"""
Return the number of lines in the file
"""
return self.lcnt
#-------------------------------------------------------------------------
#
# make_gedcom_date
#
#-------------------------------------------------------------------------
def make_gedcom_date(subdate, calendar, mode, quality):
"""
Convert a GRAMPS date structure into a GEDCOM compatible date.
"""
retval = ""
(day, mon, year) = subdate[0:3]
(mmap, prefix) = CALENDAR_MAP.get(calendar, (MONTH, ""))
if year < 0:
year = -year
bce = " B.C."
else:
bce = ""
try:
retval = __build_date_string(day, mon, year, bce, mmap)
except IndexError:
print("Month index error - %d" % mon)
retval = "%d%s" % (year, bce)
if calendar == Date.CAL_SWEDISH:
# If Swedish calendar use ISO for for date and append (swedish)
# to indicate calandar
if year and not mon and not day:
retval = "%i" % (year)
else:
retval = "%i-%02i-%02i" % (year, mon, day)
retval = retval + " (swedish)"
# Skip prefix @#DUNKNOWN@ as it seems
# not used in all other genealogy applications.
# GRAMPS can handle it on import, but not with (swedish) appended
# to explain what calendar, the unknown refer to
prefix = ""
if prefix:
retval = "%s %s" % (prefix, retval)
if mode in DATE_MODIFIER:
retval = "%s %s" % (DATE_MODIFIER[mode], retval)
if quality in DATE_QUALITY:
retval = "%s %s" % (DATE_QUALITY[quality], retval)
return retval
def __build_date_string(day, mon, year, bce, mmap):
"""
Build a date string from the supplied information.
"""
if day == 0:
if mon == 0:
retval = '%d%s' % (year, bce)
elif year == 0:
retval = '(%s)' % mmap[mon]
else:
retval = "%s %d%s" % (mmap[mon], year, bce)
elif mon == 0:
retval = '%d%s' % (year, bce)
elif year == 0:
retval = "(%d %s)" % (day, mmap[mon])
else:
retval = "%d %s %d%s" % (day, mmap[mon], year, bce)
return retval
| gpl-2.0 | -2,624,625,516,947,080,000 | 36.786019 | 96 | 0.518786 | false |
regulusweb/django-oscar-api | oscarapi/tests/testloading.py | 1 | 4795 | import sys
import os
from django.test import TestCase
from django.conf import settings
from django.test.utils import override_settings
from oscar.core.loading import (
AppNotFoundError, ClassNotFoundError)
from oscarapi.loading import get_api_class, get_api_classes
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
class LoadingTestCase(TestCase):
def setUp(self):
# add the tests directory (which contains the test apps) to the path
sys.path.append(TEST_DIR)
def tearDown(self):
sys.path.remove(TEST_DIR)
class TestClassLoading(LoadingTestCase):
"""
Oscar's class loading utilities
"""
def test_load_oscar_classes_correctly(self):
HeaderSessionMiddleware, ApiGatewayMiddleWare = get_api_classes('oscarapi.middleware',
('HeaderSessionMiddleware',
'ApiGatewayMiddleWare'))
self.assertEqual('oscarapi.middleware', HeaderSessionMiddleware.__module__)
self.assertEqual('oscarapi.middleware', ApiGatewayMiddleWare.__module__)
def test_load_oscar_class_correctly(self):
HeaderSessionMiddleware = get_api_class('oscarapi.middleware', 'HeaderSessionMiddleware')
self.assertEqual('oscarapi.middleware', HeaderSessionMiddleware.__module__)
def test_raise_exception_when_bad_appname_used(self):
with self.assertRaises(AppNotFoundError):
get_api_classes('fridge.models', ('ApiKey', 'ApiKey2'))
def test_raise_exception_when_bad_classname_used(self):
with self.assertRaises(ClassNotFoundError):
get_api_class('oscarapi.models', 'Monkey')
def test_raise_importerror_if_app_raises_importerror(self):
"""
This tests that Oscar doesn't fall back to using the Oscar catalogue
app if the overriding app throws an ImportError.
"""
apps = list(settings.INSTALLED_APPS)
apps[apps.index('oscarapi')] = 'test_apps.oscarapi'
with override_settings(INSTALLED_APPS=apps):
with self.assertRaises(ImportError):
get_api_class('oscarapi.middleware', 'HeaderSessionMiddleware')
class ClassLoadingWithLocalOverrideTests(LoadingTestCase):
def setUp(self):
super(ClassLoadingWithLocalOverrideTests, self).setUp()
self.installed_apps = list(settings.INSTALLED_APPS)
self.installed_apps[self.installed_apps.index('oscarapi')] = 'test_apps.oscarapi'
def test_loading_class_defined_in_local_module(self):
with override_settings(INSTALLED_APPS=self.installed_apps):
(BasketLineSerializer,) = get_api_classes('oscarapi.serializers.basket', ('BasketLineSerializer',))
self.assertEqual('test_apps.oscarapi.serializers.basket', BasketLineSerializer.__module__)
def test_loading_class_which_is_not_defined_in_local_module(self):
with override_settings(INSTALLED_APPS=self.installed_apps):
(BasketSerializer,) = get_api_classes('oscarapi.serializers.basket', ('BasketSerializer',))
self.assertEqual('oscarapi.serializers.basket', BasketSerializer.__module__)
def test_loading_class_from_module_not_defined_in_local_app(self):
with override_settings(INSTALLED_APPS=self.installed_apps):
(PriceSerializer,) = get_api_classes('oscarapi.serializers.checkout', ('PriceSerializer',))
self.assertEqual('oscarapi.serializers.checkout', PriceSerializer.__module__)
def test_loading_classes_defined_in_both_local_and_oscar_modules(self):
with override_settings(INSTALLED_APPS=self.installed_apps):
(BasketLineSerializer, BasketSerializer) = get_api_classes('oscarapi.serializers.basket',
('BasketLineSerializer', 'BasketSerializer'))
self.assertEqual('test_apps.oscarapi.serializers.basket', BasketLineSerializer.__module__)
self.assertEqual('oscarapi.serializers.basket', BasketSerializer.__module__)
class ClassLoadingWithLocalOverrideWithMultipleSegmentsTests(LoadingTestCase):
def setUp(self):
super(ClassLoadingWithLocalOverrideWithMultipleSegmentsTests, self).setUp()
self.installed_apps = list(settings.INSTALLED_APPS)
self.installed_apps[self.installed_apps.index('oscarapi')] = 'test_apps.apps.oscarapi'
def test_loading_class_defined_in_local_module(self):
with override_settings(INSTALLED_APPS=self.installed_apps):
(BasketLineSerializer,) = get_api_classes('oscarapi.serializers.basket', ('BasketLineSerializer',))
self.assertEqual('test_apps.apps.oscarapi.serializers.basket', BasketLineSerializer.__module__)
| bsd-3-clause | 7,638,579,017,490,807,000 | 46.009804 | 116 | 0.684046 | false |
eckardm/archivematica | src/MCPServer/lib/linkTaskManagerGetUserChoiceFromMicroserviceGeneratedList.py | 1 | 6172 | #!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <[email protected]>
# Stdlib, alphabetical by import source
import logging
from lxml import etree
import os
import sys
# This project, alphabetical by import source
from linkTaskManager import LinkTaskManager
import archivematicaMCP
from linkTaskManagerChoice import choicesAvailableForUnits
from linkTaskManagerChoice import choicesAvailableForUnitsLock
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from dicts import ReplacementDict, ChoicesDict
sys.path.append("/usr/share/archivematica/dashboard")
from main.models import StandardTaskConfig
LOGGER = logging.getLogger('archivematica.mcp.server')
class linkTaskManagerGetUserChoiceFromMicroserviceGeneratedList(LinkTaskManager):
def __init__(self, jobChainLink, pk, unit):
super(linkTaskManagerGetUserChoiceFromMicroserviceGeneratedList, self).__init__(jobChainLink, pk, unit)
self.choices = []
stc = StandardTaskConfig.objects.get(id=str(pk))
key = stc.execute
choiceIndex = 0
if isinstance(self.jobChainLink.passVar, list):
for item in self.jobChainLink.passVar:
LOGGER.debug('%s is ChoicesDict: %s', item, isinstance(item, ChoicesDict))
if isinstance(item, ChoicesDict):
# For display, convert the ChoicesDict passVar into a list
# of tuples: (index, description, replacement dict string)
for description, value in item.iteritems():
replacementDic_ = str({key: value})
self.choices.append((choiceIndex, description, replacementDic_))
choiceIndex += 1
break
else:
LOGGER.error("ChoicesDict not found in passVar: %s", self.jobChainLink.passVar)
raise Exception("ChoicesDict not found in passVar: {}".format(self.jobChainLink.passVar))
else:
LOGGER.error("passVar is %s instead of expected list",
type(self.jobChainLink.passVar))
raise Exception("passVar is {} instead of expected list".format(
type(self.jobChainLink.passVar)))
LOGGER.info('Choices: %s', self.choices)
preConfiguredIndex = self.checkForPreconfiguredXML()
if preConfiguredIndex is not None:
self.jobChainLink.setExitMessage("Completed successfully")
self.proceedWithChoice(index=preConfiguredIndex, agent=None)
else:
choicesAvailableForUnitsLock.acquire()
self.jobChainLink.setExitMessage('Awaiting decision')
choicesAvailableForUnits[self.jobChainLink.UUID] = self
choicesAvailableForUnitsLock.release()
def checkForPreconfiguredXML(self):
""" Check the processing XML file for a pre-selected choice.
Returns an index for self.choices if found, None otherwise. """
sharedPath = archivematicaMCP.config.get('MCPServer', "sharedDirectory")
xmlFilePath = os.path.join(
self.unit.currentPath.replace("%sharedPath%", sharedPath, 1),
archivematicaMCP.config.get('MCPServer', "processingXMLFile")
)
try:
tree = etree.parse(xmlFilePath)
root = tree.getroot()
except (etree.LxmlError, IOError) as e:
LOGGER.warning('Error parsing xml at %s for pre-configured choice', xmlFilePath, exc_info=True)
return None
for choice in root.findall(".//preconfiguredChoice"):
# Find the choice whose text matches this link's description
if choice.find("appliesTo").text == self.jobChainLink.pk:
# Search self.choices for desired choice, return index of
# matching choice
desiredChoice = choice.find("goToChain").text
for choice in self.choices:
index, description, replace_dict = choice
if desiredChoice == description or desiredChoice in replace_dict:
return index
return None
def xmlify(self):
"""Returns an etree XML representation of the choices available."""
ret = etree.Element("choicesAvailableForUnit")
etree.SubElement(ret, "UUID").text = self.jobChainLink.UUID
ret.append(self.unit.xmlify())
choices = etree.SubElement(ret, "choices")
for chainAvailable, description, rd in self.choices:
choice = etree.SubElement(choices, "choice")
etree.SubElement(choice, "chainAvailable").text = chainAvailable.__str__()
etree.SubElement(choice, "description").text = description
return ret
def proceedWithChoice(self, index, agent):
if agent:
self.unit.setVariable("activeAgent", agent, None)
choicesAvailableForUnitsLock.acquire()
try:
del choicesAvailableForUnits[self.jobChainLink.UUID]
except KeyError:
pass
choicesAvailableForUnitsLock.release()
#get the one at index, and go with it.
_, _, replace_dict = self.choices[int(index)]
rd = ReplacementDict.fromstring(replace_dict)
self.update_passvar_replacement_dict(rd)
self.jobChainLink.linkProcessingComplete(0, passVar=self.jobChainLink.passVar)
| agpl-3.0 | -6,378,924,127,823,612,000 | 44.382353 | 111 | 0.667693 | false |
ngsutils/ngsutils | ngsutils/bam/t/test_removeclipping.py | 1 | 1828 | #!/usr/bin/env python
'''
Tests for bamutils removeclipping
'''
import unittest
from ngsutils.bam.t import MockRead, assertIn
import ngsutils.bam
import ngsutils.bam.removeclipping
class RemoveClippingTest(unittest.TestCase):
def testUnmapped(self):
read = MockRead('foo', 'AAAATTTTCCCGGG', 'AAAABBBBBBBCCC', tags=[('XA', 1)])
code = ngsutils.bam.removeclipping.read_removeclipping(read)
self.assertEqual(code, 1)
self.assertEqual(read.qname, 'foo') # update occurs in place
assertIn(('XA', 1), read.tags) # added tag
self.assertEqual(read.seq, 'AAAATTTTCCCGGG')
self.assertEqual(read.qual, 'AAAABBBBBBBCCC')
def testRemoveClipping(self):
read = MockRead('foo', 'AAAATTTTCCCGGG', 'AAAABBBBBBBCCC', tags=[('XA', 1)], tid=1, pos=1, cigar='4S7M3S')
code = ngsutils.bam.removeclipping.read_removeclipping(read)
self.assertEqual(code, 2)
self.assertEqual(read.qname, 'foo') # update occurs in place
assertIn(('XA', 1), read.tags) # added tag
assertIn(('ZA', 4), read.tags) # added tag
assertIn(('ZB', 3), read.tags) # added tag
assertIn(('ZC', 0.5), read.tags) # added tag
self.assertEqual(read.seq, 'TTTTCCC')
self.assertEqual(read.qual, 'BBBBBBB')
def testNoClipping(self):
read = MockRead('foo', 'AAAATTTTCCCGGG', 'AAAABBBBBBBCCC', tags=[('XA', 1)], tid=1, pos=1, cigar='14M')
code = ngsutils.bam.removeclipping.read_removeclipping(read)
self.assertEqual(code, 0)
self.assertEqual(read.qname, 'foo') # update occurs in place
assertIn(('XA', 1), read.tags) # added tag
self.assertEqual(read.seq, 'AAAATTTTCCCGGG')
self.assertEqual(read.qual, 'AAAABBBBBBBCCC')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,194,755,863,590,038,500 | 36.306122 | 114 | 0.642779 | false |
tomkralidis/pywps | tests/test_capabilities.py | 1 | 4904 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import unittest
import lxml
import lxml.etree
from pywps.app import Process, Service
from pywps.app.Common import Metadata
from pywps import get_ElementMakerForVersion
from pywps.tests import assert_pywps_version, client_for, assert_wps_version
WPS, OWS = get_ElementMakerForVersion("1.0.0")
class BadRequestTest(unittest.TestCase):
def test_bad_http_verb(self):
client = client_for(Service())
resp = client.put('')
assert resp.status_code == 405 # method not allowed
def test_bad_request_type_with_get(self):
client = client_for(Service())
resp = client.get('?Request=foo')
assert resp.status_code == 400
def test_bad_service_type_with_get(self):
client = client_for(Service())
resp = client.get('?service=foo')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'InvalidParameterValue'
def test_bad_request_type_with_post(self):
client = client_for(Service())
request_doc = WPS.Foo()
resp = client.post_xml('', doc=request_doc)
assert resp.status_code == 400
class CapabilitiesTest(unittest.TestCase):
def setUp(self):
def pr1(): pass
def pr2(): pass
self.client = client_for(Service(processes=[Process(pr1, 'pr1', 'Process 1', abstract='Process 1', keywords=['kw1a','kw1b'], metadata=[Metadata('pr1 metadata')]), Process(pr2, 'pr2', 'Process 2', keywords=['kw2a'], metadata=[Metadata('pr2 metadata')])]))
def check_capabilities_response(self, resp):
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/xml'
title = resp.xpath_text('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:Title')
assert title != ''
names = resp.xpath_text('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Identifier')
assert sorted(names.split()) == ['pr1', 'pr2']
keywords = resp.xpath('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Keywords'
'/ows:Keyword')
assert len(keywords) == 3
metadatas = resp.xpath('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Metadata')
assert len(metadatas) == 2
def test_get_request(self):
resp = self.client.get('?Request=GetCapabilities&service=WpS')
self.check_capabilities_response(resp)
# case insesitive check
resp = self.client.get('?request=getcapabilities&service=wps')
self.check_capabilities_response(resp)
def test_post_request(self):
request_doc = WPS.GetCapabilities()
resp = self.client.post_xml(doc=request_doc)
self.check_capabilities_response(resp)
def test_get_bad_version(self):
resp = self.client.get('?request=getcapabilities&service=wps&acceptversions=2001-123')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_post_bad_version(self):
acceptedVersions_doc = OWS.AcceptVersions(OWS.Version('2001-123'))
request_doc = WPS.GetCapabilities(acceptedVersions_doc)
resp = self.client.post_xml(doc=request_doc)
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_version(self):
resp = self.client.get('?service=WPS&request=GetCapabilities&version=1.0.0')
assert_wps_version(resp)
def test_version2(self):
resp = self.client.get('?service=WPS&request=GetCapabilities&acceptversions=2.0.0')
assert_wps_version(resp, version="2.0.0")
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(BadRequestTest),
loader.loadTestsFromTestCase(CapabilitiesTest),
]
return unittest.TestSuite(suite_list)
| mit | -8,208,247,979,001,587,000 | 38.232 | 262 | 0.582586 | false |
RuggedPOD/ruggedpod-api | ruggedpod_api/services/gpio.py | 1 | 3539 | # RuggedPOD management API
#
# Copyright (C) 2015 Maxime Terras <[email protected]>
# Copyright (C) 2015 Pierre Padrixe <[email protected]>
# Copyright (C) 2015 Guillaume Giamarchi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from ruggedpod_api import config
from ruggedpod_api.common import dependency
reset_dict = config.get_attr('reset')
onoff_dict = config.get_attr('onoff')
short_press = config.get_attr('short_press')
long_press = config.get_attr('long_press')
serial_select_dict = config.get_attr('serial_select')
oil_pump_dict = config.get_attr('oil_pump')
GPIO = dependency.lookup('gpio')
def init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
for blade_id in reset_dict:
GPIO.setup(reset_dict[blade_id], GPIO.OUT)
GPIO.output(reset_dict[blade_id], True)
for blade_id in onoff_dict:
GPIO.setup(onoff_dict[blade_id], GPIO.OUT)
GPIO.output(onoff_dict[blade_id], False)
for blade_id in serial_select_dict:
GPIO.setup(serial_select_dict[blade_id], GPIO.OUT)
GPIO.output(serial_select_dict[blade_id], False)
def set_blade_short_onoff(blade_id):
GPIO.output(onoff_dict[blade_id], True)
time.sleep(short_press)
GPIO.output(onoff_dict[blade_id], False)
def set_all_blades_short_onoff():
for blade_id in onoff_dict:
GPIO.output(onoff_dict[blade_id], True)
time.sleep(short_press)
for blade_id in onoff_dict:
GPIO.output(onoff_dict[blade_id], False)
def set_blade_long_onoff(blade_id):
GPIO.output(onoff_dict[blade_id], True)
time.sleep(long_press)
GPIO.output(onoff_dict[blade_id], False)
def set_all_blades_long_onoff():
for blade_id in onoff_dict:
GPIO.output(onoff_dict[blade_id], True)
time.sleep(long_press)
for blade_id in onoff_dict:
GPIO.output(onoff_dict[blade_id], False)
def set_blade_reset(blade_id):
GPIO.output(reset_dict[blade_id], False)
time.sleep(short_press)
GPIO.output(reset_dict[blade_id], True)
def set_all_blades_reset():
for blade_id in reset_dict:
GPIO.output(reset_dict[blade_id], False)
time.sleep(short_press)
for blade_id in onoff_dict:
GPIO.output(onoff_dict[blade_id], True)
def start_blade_serial_session(blade_id):
for address_bit in serial_select_dict:
status = False
if (((int(blade_id) - 1) >> int(address_bit)) & 1):
status = True
GPIO.output(serial_select_dict[address_bit], status)
def set_blade_oil_pump_on(bladeId):
GPIO.output(oil_pump_dict[bladeId], True)
def set_all_blades_oil_pumps_on():
for bladeId in oil_pump_dict:
GPIO.output(oil_pump_dict[bladeId], True)
def set_blade_oil_pump_off(bladeId):
GPIO.output(oil_pump_dict[bladeId], False)
def set_all_blades_oil_pump_off():
for bladeId in oil_pump_dict:
GPIO.output(oil_pump_dict[bladeId], False)
| gpl-3.0 | -8,523,999,146,722,254,000 | 29.773913 | 72 | 0.694264 | false |
twestbrookunh/paladin | run.py | 1 | 14540 | #!/usr/bin/python
"""Wrapper script for running PALADIN and returning the results."""
import os
import json
import uuid
import boto3
import shutil
import logging
import argparse
import subprocess
def run_cmds(commands, retry=0, catchExcept=False):
"""Run commands and write out the log, combining STDOUT & STDERR."""
logging.info("Commands:")
logging.info(' '.join(commands))
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
exitcode = p.wait()
if stdout:
logging.info("Standard output of subprocess:")
for line in stdout.split('\n'):
logging.info(line)
if stderr:
logging.info("Standard error of subprocess:")
for line in stderr.split('\n'):
logging.info(line)
# Check the exit code
if exitcode != 0 and retry > 0:
msg = "Exit code {}, retrying {} more times".format(exitcode, retry)
logging.info(msg)
run_cmds(commands, retry=retry - 1)
elif exitcode != 0 and catchExcept:
msg = "Exit code was {}, but we will continue anyway"
logging.info(msg.format(exitcode))
else:
assert exitcode == 0, "Exit code {}".format(exitcode)
def run_paladin(input_str,
db_fp,
db_url,
output_folder,
threads=16,
temp_folder='/share/temp'):
"""Align a set of reads against a reference database with Paladin."""
# Use the read prefix to name the output and temporary files
read_prefix = input_str.split('/')[-1]
# Check to see if the output already exists, if so, skip this sample
output_fp = output_folder.rstrip('/') + '/' + read_prefix + '.json.gz'
if output_fp.startswith('s3://'):
# Check S3
logging.info("Making sure that the output path doesn't already exist")
bucket = output_fp[5:].split('/')[0]
prefix = '/'.join(output_fp[5:].split('/')[1:])
client = boto3.client('s3')
results = client.list_objects(Bucket=bucket, Prefix=prefix)
if 'Contents' in results:
msg = "Output already exists, skipping ({})"
logging.info(msg.format(output_fp))
return
else:
# Check local filesystem
if os.path.exists(output_fp):
msg = "Output already exists, skipping ({})"
logging.info(msg.format(output_fp))
return
# Get the reads
read_fp = get_reads_from_url(input_str, temp_folder)
# Align the reads against the reference database
logging.info("Aligning reads")
output_prefix = os.path.join(temp_folder, read_prefix)
run_cmds(["/usr/bin/paladin/paladin",
"align",
"-t", str(threads), # Threads
"-o", output_prefix, # Output path
"-u", "0", # Don't contact uniprot.org
db_fp, # Database prefix
read_fp]) # FASTQ input
# Output path
output_fp = os.path.join(temp_folder, read_prefix + "_uniprot.tsv")
assert os.path.exists(output_fp)
# Parse the alignment to get the abundance summary statistics
logging.info("Parsing the output ({})".format(output_fp))
paladin_results = parse_tsv(output_fp)
# Clean up the output and FASTQ
os.unlink(output_fp)
# Don't delete local files
if any([input_str.startswith("sra://"),
input_str.startswith("s3://"),
input_str.startswith("ftp://")]):
os.unlink(read_fp)
# Read in the logs
logging.info("Reading in the logs")
logs = open(log_fp, 'rt').readlines()
# Make an object with all of the results
out = {
"input_path": input_str,
"input": read_prefix,
"output_folder": output_folder,
"logs": logs,
"ref_db": db_fp,
"results": paladin_results
}
# Write out the final results JSON and write them to the output folder
return_results(out, read_prefix, output_folder, temp_folder)
def parse_tsv(fp):
"""Parse the Paladin output (in TSV format)."""
dat = []
with open(fp, "rt") as f:
header = f.readline().rstrip("\n").split("\t")
for line in f:
line = line.rstrip("\n").split("\t")
# Skip empty lines
if len(line) == 1:
continue
assert len(line) == len(header)
dat.append(dict(zip(header, line)))
logging.info("Read in {} lines from {}".format(len(dat), fp))
return dat
def get_sra(accession, temp_folder):
"""Get the FASTQ for an SRA accession via ENA."""
local_path = os.path.join(temp_folder, accession + ".fastq")
# Download from ENA via FTP
# See https://www.ebi.ac.uk/ena/browse/read-download for URL format
url = "ftp://ftp.sra.ebi.ac.uk/vol1/fastq"
folder1 = accession[:6]
url = "{}/{}".format(url, folder1)
if len(accession) > 9:
if len(accession) == 10:
folder2 = "00" + accession[-1]
elif len(accession) == 11:
folder2 = "0" + accession[-2:]
elif len(accession) == 12:
folder2 = accession[-3:]
else:
logging.info("This accession is too long: " + accession)
assert len(accession) <= 12
url = "{}/{}".format(url, folder2)
# Add the accession to the URL
url = "{}/{}/{}".format(url, accession, accession)
logging.info("Base info for downloading from ENA: " + url)
# There are three possible file endings
file_endings = ["_1.fastq.gz", "_2.fastq.gz", ".fastq.gz"]
# Try to download each file
for end in file_endings:
run_cmds(["curl",
"-o", os.path.join(temp_folder, accession + end),
url + end], catchExcept=True)
# If none of those URLs downloaded, fall back to trying NCBI
if any([os.path.exists("{}/{}{}".format(temp_folder, accession, end))
for end in file_endings]):
# Combine them all into a single file
logging.info("Combining into a single FASTQ file")
with open(local_path, "wt") as fo:
cmd = "gunzip -c {}/{}*fastq.gz".format(temp_folder, accession)
gunzip = subprocess.Popen(cmd, shell=True, stdout=fo)
gunzip.wait()
# Clean up the temporary files
logging.info("Cleaning up temporary files")
for end in file_endings:
fp = "{}/{}{}".format(temp_folder, accession, end)
if os.path.exists(fp):
os.unlink(fp)
else:
logging.info("No files found on ENA, trying SRA")
run_cmds(["fastq-dump", "--outdir", temp_folder, accession])
# Check to see if the file was downloaded
msg = "File could not be downloaded from SRA: {}".format(accession)
assert os.path.exists(local_path), msg
# Return the path to the file
logging.info("Done fetching " + accession)
return local_path
def get_reads_from_url(input_str, temp_folder):
"""Get a set of reads from a URL -- return the downloaded filepath and file prefix."""
logging.info("Getting reads from {}".format(input_str))
if input_str.startswith(('s3://', 'sra://', 'ftp://')) is False:
logging.info("Path does not start with s3://, sra://, or ftp://")
# Check that the path exists locally
assert os.path.exists(input_str), "Path does not exist locally"
logging.info("{} is a valid local path".format(input_str))
# Return the input string as the valid local path
return input_str
filename = input_str.split('/')[-1]
local_path = os.path.join(temp_folder, filename)
logging.info("Filename: " + filename)
logging.info("Local path: " + local_path)
# Get files from AWS S3
if input_str.startswith('s3://'):
logging.info("Getting reads from S3")
run_cmds(['aws', 's3', 'cp', '--quiet', '--sse', 'AES256', input_str, temp_folder])
return local_path
# Get files from an FTP server
elif input_str.startswith('ftp://'):
logging.info("Getting reads from FTP")
run_cmds(['wget', '-P', temp_folder, input_str])
return local_path
# Get files from SRA
elif input_str.startswith('sra://'):
accession = filename
logging.info("Getting reads from SRA: " + accession)
local_path = get_sra(accession, temp_folder)
return local_path
else:
raise Exception("Did not recognize prefix for reads: " + input_str)
def get_reference_database(ref_db, temp_folder):
"""Get a reference database."""
# Get files from AWS S3
if ref_db.startswith('s3://'):
logging.info("Getting reference database from S3: " + ref_db)
# Save the database to a local path with a random string prefix
# This avoids collision between multiple running processes
rand_string = uuid.uuid4()
local_folder = os.path.join(temp_folder, "{}.db/".format(rand_string))
os.mkdir(local_folder)
logging.info("Saving database to " + local_folder)
run_cmds(['aws', 's3', 'sync', '--quiet', '--sse', 'AES256',
ref_db, local_folder])
# If the database was downloaded from S3, delete it when finished
delete_db_when_finished = True
# Get the prefix for the database
for fp in os.listdir(local_folder):
if fp.endswith(".pro"):
prefix = fp[:-4]
local_fp = os.path.join(local_folder, prefix)
logging.info("Prefix for reference database is {}".format(local_fp))
return local_fp, delete_db_when_finished
else:
# Treat the input as a local path
logging.info("Getting reference database from local path: " + ref_db)
assert os.path.exists(ref_db)
# Get the prefix for the database
local_fp = None
for fp in os.listdir(ref_db):
if fp.endswith(".pro"):
prefix = fp[:-4]
local_fp = os.path.join(ref_db, prefix)
msg = "No Paladin database could be found in " + ref_db
assert local_fp is not None, msg
logging.info("Prefix for reference database is {}".format(ref_db))
# Don't delete this database when finished
delete_db_when_finished = False
return local_fp, delete_db_when_finished
def return_results(out, read_prefix, output_folder, temp_folder):
"""Write out the results as JSON and copy to the output folder."""
# Make a temporary file
temp_fp = os.path.join(temp_folder, read_prefix + '.json')
with open(temp_fp, 'wt') as fo:
json.dump(out, fo)
# Compress the output
run_cmds(['gzip', temp_fp])
temp_fp = temp_fp + '.gz'
if output_folder.startswith('s3://'):
# Copy to S3
run_cmds(['aws', 's3', 'cp', '--quiet', '--sse', 'AES256',
temp_fp, output_folder])
else:
# Copy to local folder
run_cmds(['mv', temp_fp, output_folder])
def make_scratch_space(scratch_size, temp_folder):
"""Create scratch space using a ramdisk."""
run_cmds(['mount', '-t', 'tmpfs', '-o', 'size={}g'.format(scratch_size),
'tmpfs', temp_folder])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Align a set of reads against a reference database with DIAMOND, and save the results.
""")
parser.add_argument("--input",
type=str,
help="""Location for input file(s). Comma-separated.
(Supported: sra://, s3://, or ftp://).""")
parser.add_argument("--ref-db",
type=str,
help="""Folder containing reference database.
(Supported: s3://, ftp://, or local path).""")
parser.add_argument("--output-folder",
type=str,
help="""Folder to place results.
(Supported: s3://, or local path).""")
parser.add_argument("--scratch-size",
type=int,
default=None,
help="If specified, create a ramdisk of this size (Gb).")
parser.add_argument("--threads",
type=int,
default=16,
help="Number of threads to use aligning.")
parser.add_argument("--temp-folder",
type=str,
default='/share',
help="Folder used for temporary files (and ramdisk, if specified).")
args = parser.parse_args()
# Set up logging
log_fp = 'log.txt'
logFormatter = logging.Formatter('%(asctime)s %(levelname)-8s [run.py] %(message)s')
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
# Write to file
fileHandler = logging.FileHandler(log_fp)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# Also write to STDOUT
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
# Set up the scratch space
if args.scratch_size is not None:
logging.info("Setting up scratch space ({}Gb)".format(args.scratch_size))
make_scratch_space(args.scratch_size, args.temp_folder)
# Get the reference database
db_fp, delete_db_when_finished = get_reference_database(args.ref_db,
args.temp_folder)
logging.info("Reference database: " + db_fp)
# Align each of the inputs and calculate the overall abundance
for input_str in args.input.split(','):
logging.info("Processing input argument: " + input_str)
run_paladin(input_str, # ID for single sample to process
db_fp, # Local path to DB
args.ref_db, # URL of ref DB, used for logging
args.output_folder, # Place to put results
threads=args.threads,
temp_folder=args.temp_folder)
# Delete the reference database
if delete_db_when_finished:
logging.info("Deleting reference database: {}".format(db_fp))
shutil.rmtree(db_fp)
# Stop logging
logging.info("Done")
logging.shutdown()
| mit | 8,326,087,270,629,893,000 | 36.474227 | 92 | 0.574966 | false |
Bugaa92/Twitter-Tagcloud | src/redis_tagcloud.py | 1 | 6589 | """
Project Name: Twitter Tagcloud
Author: Alexandru Buliga
Email: [email protected]
"""
import sys
import re
import logging
import json
import redis
from threading import currentThread, enumerate, Lock, Thread
from collections import Counter, OrderedDict
from datetime import datetime
import tweepy
import resource
class TweetRetriever:
"""
Retrieves tweets using the Tweeter API provided by tweepy
Performs authentication with OAuth protocol
"""
def __init__(self, creds, stopwords):
"""
Constructor method
@param creds: dictionary containins authentication tokens
@param stopwords: set of words that are not taken into account
"""
self.stopwords = stopwords
self.creds = creds
# Result per page constant defined here
self.RESULTS_PER_PAGE = 100
# OAuth Authentication
self.auth = tweepy.OAuthHandler(
creds['consumer_key'], creds['consumer_secret'])
self.auth.secure = True
self.auth.set_access_token(
creds['access_token'], creds['access_token_secret'])
# Setting the Teepy API
self.api = tweepy.API(self.auth)
# Used to guarantee atomic access to the global counter
self.lock = Lock()
# List used to hold the word counts
self.globalCountList = []
# Setting local redis server
self.redisServer = redis.Redis('localhost')
self.redisServer.flushall()
def doWork(self, tweetList):
"""
Function associated with worker thread; gets all the words and its
occurances in the tweetList and updated the global counter
@param tweetList: a list of tweets for the worker thread
"""
# Get the list of words
wordList = []
cleanWordList = []
for tweetText in tweetList:
wordList.extend(re.findall(r"[\w']+", tweetText.lower()))
# Convert the strings to ascii by uncommenting the line after next
for word in wordList:
# word = word.encode('ascii', 'ignore')
if word not in self.stopwords:
self.redisServer.incr(word)
def run(self, durationInterval, wordCount):
"""
Tweets retrieval method
@param durationInterval: the duration of the data fetch process
@param wordCount [optional]: how many results to show
"""
counter = 0
startTime = None
tweetList = []
if durationInterval <= 0:
return
# Get tweepy cursor
cursor = tweepy.Cursor(self.api.search,
q = "a",
count = self.RESULTS_PER_PAGE,
result_type = "recent",
lang = "en").items()
# Iterate all tweets in the past durationInterval seconds using Cursor
while True:
try:
tweet = cursor.next()
except tweepy.TweepError:
print "Error. Exceeded Twitter request limit.", \
"Try again in 15 minutes."
break
# Store info about the tweet
postTime = tweet.created_at
tweetList.append(tweet.text)
if startTime:
# Check if durationInterval has passed and we have to stop
if abs((postTime - startTime).total_seconds()) > durationInterval:
# Start last worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
break
else:
# Mark the current time of the first retrieved tweet and count
# durationInterval seconds starting from here
startTime = postTime
counter += 1
if counter == self.RESULTS_PER_PAGE:
# Start worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
counter = 0
tweetList = []
# Wait threads to finish their work
main_thread = currentThread()
for thread in enumerate():
if thread is main_thread:
continue
thread.join()
keysList = self.redisServer.keys(pattern = '*')
for key in keysList:
self.globalCountList.append((key, int(self.redisServer.get(key))))
self.globalCountList.sort(key = lambda x: x[1], reverse = True)
if (wordCount >= 0):
# Count how many other words there are
otherWordCounter = self.globalCountList[wordCount::]
otherCount = sum(count for _, count in otherWordCounter)
# Update the global counter with the special word, other
self.globalCountList = self.globalCountList[:wordCount:]
self.globalCountList.append(('other', otherCount))
# Write results to a local JSON file
self.writeResult()
def writeResult(self):
"""
Write results to a local JSON file
"""
wcList = []
# Convert list elements to dictionary for pretty printing
for elem in self.globalCountList:
wcList.append(OrderedDict([('word', elem[0]), ('count', elem[1])]))
with open('results.json', 'w') as out_file:
json.dump(wcList, out_file, indent = 4, separators = (',', ': '))
def main():
"""
Main function definition
"""
# Disabling some ugly warnings
logging.captureWarnings(True)
# Verifying if the command-line arguments are passed
if len(sys.argv) < 2:
print "Error. Run: python tagcloud.py <duration> [<wordCount>]"
sys.exit()
# Getting the duration of the data fetch process
durationInterval = sys.argv[1]
wordCount = -1
try:
durationInterval = int(durationInterval)
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# If the word count argument is passed, get it
if len(sys.argv) == 3:
try:
wordCount = int(sys.argv[2])
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# Start retrieving tweets
tweetRetriever = TweetRetriever(resource.creds, resource.stopwords)
tweetRetriever.run(durationInterval, wordCount)
"""
Start main
"""
if __name__ == '__main__':
main()
| apache-2.0 | -4,796,557,039,112,764,000 | 29.646512 | 82 | 0.574139 | false |
kristyj/tweetcluster | twit_clustering/EMContainer.py | 1 | 16220 | __author__ = 'Kristy'
#methods for em clustering on a clustercontainer object
import LanguageModel
from collections import deque
from collections import defaultdict
import numpy as np
'''Classes that handle building topic-based language models from documents using Expectation Maximisation.
ThetaParams holds the information about the priors of ???'''
# TODO: Replace ThetaParams with a Clustercontainer object
class ThetaParams(object):
"""Holder for the EM mixture clusters, recording prior and posteriors and LMs for each topic.
LMs are accessible as clusterobject.clusterlms[i]
self.topics is a list, for which every item is an EMTopicClass. These contain details about what's in the topics."""
def __init__(self, clusterobject, lm_order, style = 'iyer-ostendorf', iters=5): #
#information from input
self.style = style
self.maxiters = iters
self.m = clusterobject.m
#initialise value placeholders
self.normaliser = 1
self.doc_associations = [[1/self.m] * self.m] # TODO: Hold in clusterobject.doc_cluster_asm
# TODO: THese are also in Clustercontainer
self.current_iter = 0
self.iter_changes = [100]
#self.do_em()
# TODO: Use clusterobject to make self.alltweets, self.totaldocs
# #read tweets into object, to iterate over in expectation
# self.alltweets = []
# for c in clusterobject.clusters:
# self.alltweets += [t for t in c] #break into list of text already
# self.totaldocs = len(self.alltweets)
# print(self.totaldocs, "tweets put in a list ") ##
# TODO: Use ClusterParams to set how the topics should be initialised
#style is passed to EMTopicClass and made into a language model of this style
if style == 'iyer-ostendorf': #initialise probabilities on split corpus
#initialise the topic objects from input
self.topics = [EMTopicClass(self.m, [tweet.wordlist for tweet in clusterobject.clusters[x]], lm_order, self.totaldocs, self.style) for x in range(self.m)]
elif style=='gildea-hoffman':
self.topics = [EMTopicClass(self.m, [t.giveWords() for t in self.alltweets], lm_order, self.totaldocs, self.style) for x in range(self.m)]
else:
print("no EM style was chosen")
exit()
def do_em(self): #
"""initialise the expectation-maximisation procedure"""
print("Now initialising EM on {} language models".format(self.m))
while self.maxiters > self.current_iter and ThetaParams.measure_change(self.iter_changes):
# while manually set iterations remain and the model changes significantly
# #todo: check the definition of the stopping point by self.iter_changes
self.current_iter += 1
print("Now performing expectaton\nThis may take some time as all documents are re-read and evaluated over each topic LM.")
self.expectation()
print("Now performing maximisation, this re-evaluates every n-gram, therefore is slow.")
self.maximisation()
print("The model changed as such:", self.iter_changes)
def __str__(self):
return "Expectation maximisation holder, the mixes are {}".format(self.topics)
@staticmethod
def measure_change(somelist):
'''For the m topics, look at the list of how much changed in the last iteration. Return True if there was a perceptible change, False if not.'''
# TODO: Research a relevant measure
return True if sum(somelist) > 0.01 else False
# def give_topic_lms(self):
# return [topic.lm.probs for topic in self.topics]
def expectation(self):
print("Now performing {}th iteration, expectation step".format(self.current_iter))
for tweet in self.alltweets:
#theta.topic.posteriors contains zij scores # TODO: Is this w|t?
self.calc_sentence_topic_iteration_prob(tweet.giveWords())
def maximisation(self):
"""adjust word to topic association (w|t) based on posteriors,
read through corpus again and update lm counts of each gram"""
print("Now performing {}th iteration, maximisation step".format(self.current_iter))
#clear the counts
for topic in self.topics:
#topic.lm.grams = {} grams retains the original counts
topic.lm.interims = defaultdict(dict)
topic.lm.probs = defaultdict(dict)
topic.temp_prior = sum(topic.posteriors)
#make a new weighted count dictionary [interims] for each bigram including the sentence weight (zij) in the topic
print("Recounting n-grams to include weight from expectation step.")
for tweet in self.alltweets:
sentencelist = [LanguageModel.startsymbol]+ tweet.giveWords() + [LanguageModel.endsymbol]
for topic in self.topics: #for each topic
current_zij = topic.posteriors.popleft()
for i in range(1, topic.lm.order + 1): #for each order (unigram+)
#make each gram
order_grams = LanguageModel.duplicate_to_n_grams(i, sentencelist, delete_sss=False)
#update the weighted counts
for gram in order_grams:
topic.lm.interims[i][gram] = topic.lm.interims[i].get(gram, 0) + current_zij #multiply by adding zij whenever encountered
#for each topic, adjust all the probs dictionaries
temp_total_zij = sum([topic.temp_prior for topic in self.topics])
self.reset_posteriors()
self.iter_changes = []
print("Recalculating n-gram probabilities based on the new weighted counts.")
for topic in self.topics:
#update priors
print('T', end='')
self.iter_changes.append(topic.temp_prior - topic.prior) #record if it changed
topic.prior = topic.temp_prior / temp_total_zij
#TODO #no idea what to do for unigrams, currently don't mess with their initial probability, just normalise by the zij's collected for each unigram
topic.lm.probs[1] = LanguageModel.counts_to_smoothed_prob(topic.lm.interims[1], sum(list(topic.lm.interims[1].values())), smoothing='nul')
for i in range(2, topic.lm.order +1): #begin with bigrams
topic.lm.probs[i] = {} #empty the dictionary
for words, weightedcount in topic.lm.interims[i].items():
#These use the terminology of Iyer and Ostendorf pg.2
bqzij = topic.lm.all_other_last_words(words, weighted_count=True, include_words=False)
bq = topic.lm.all_other_last_words(words, weighted_count=False, include_words=False)
weighted_count_all_endings = sum(bqzij)
if weighted_count_all_endings == 0:
print("weighted_count_all_endings was 0") #TODO: THis means things are not the same
exit()
ml = weightedcount / weighted_count_all_endings
inside_fraction = sum([x / y for x, y in zip(bqzij, bq)]) #sum(q of (zij * count nbq)/(count nbq)
bo = ( inside_fraction ) / (weighted_count_all_endings + inside_fraction )
if bo >1 or ml >1:
print("Warning, the backoff weight exceeds 1! orthe maximum likelihood value is >1" , bo, ml, )
print("word", words)
print("weightedcount numerator", weightedcount)
print("Weightedcount denominator", weighted_count_all_endings)
exit()
topic.lm.probs[i][words]= (1- bo) * ml + bo * topic.lm.probs[i-1][words[1:]]
print("checking the progression back to probability dictionary", sum(list(topic.lm.probs[1].values())))
def __str__(self):
return """{} mixes maintained with:\n priors: {}\n
posteriors beginning: {}\nEach has a language model initialised on an initial cluster"""\
.format(self.m, str([topic.posteriors[:3] for topic in self.topics]), )
def calc_sentence_topic_iteration_prob(self, sentencelist):
self.normaliser = 0
#calculate numerators
sentenceprobs = [(topic.lm.give_sentence_probability(sentencelist, log=True)**10) for topic in self.topics]
priors = [topic.prior for topic in self.topics]
numerators = [sentenceprobs[x] * priors[x] for x in range(self.m)]
#calclate the denominator
normalizer = sum(numerators)
zij = [numerators[x]/normalizer for x in range(self.m)]
#extend the posteriors deque for each topic
for ti in range(len(self.topics)):
self.topics[ti].posteriors.extend([zij[ti]])
def reset_posteriors(self):
for topic in self.topics:
topic.posteriors = deque()
def give_as_clusters(self):
from lib.ClusterContainer import ClusterContainer
cluster_obj = ClusterContainer
cluster_obj.clusters = [[] for x in range(self.m)]
if len(self.doc_associations) < 3:
print("There is too little info in self.doc_associations")
exit()
for document_number, doc_scores in enumerate(self.doc_associations):
max_topic, max_score = max(enumerate(doc_scores), key=lambda x: x[1])
cluster_obj.clusters[max_topic].append(self.alltweets[document_number])
return cluster_obj
def print_strongest_doc_associations(self):
for topic_index, topic in enumerate(self.topics):
print("Now printing topic {}".format(topic_index))
for doc_index, document_scores in enumerate(self.doc_associations):
if max(enumerate(document_scores), key = lambda x:x[1])[0]==topic_index:
print(self.alltweets[doc_index])
print('topic break *******************************************************')
class EMTopicClass(object):
def __init__(self, totalclasses, tweetlist_from_cluster, lm_order, totaldocs, style):
'''Initiate the parameters for each topic in the EM mix - posteriors, priors, also LM information per topic'''
#posterior is a list of the posterior for each tweet in order (essentially a list of zij for the same j)
self.posteriors = deque() #updated for each iteration
if style == 'iyer-ostendorf':
# self.prior = 1/totaldocs #for Iyer and Ostendorf, reflecting that some topic models start with larger prior
self.lm = LanguageModel(tweetlist_from_cluster, lm_order, smoothing='witten-bell', by_document=False)
elif style=='gildea-hoffman':
# self.prior = 1/totalclasses #set at start, update at each iteration
self.lm = LanguageModel(tweetlist_from_cluster, lm_order, smoothing='add-one', by_document=False)
else:
print("Define a valid style for the language model created for the EM mix")
exit()
#self.temp_sent_prob = float #TODO: Deal with this overflow???
class GHThetaParams(ThetaParams):
'''EM class with functions specific to the methods used by Gildea and Hoffman.
This means the expectation and maximisation override the Iyer Ostendorf default'''
def __init__(self, *args, **kwargs):
super(self.__class__,self).__init__(*args, **kwargs)
#make a language model over all the documents recording wordcounts
self.universal_lm = LanguageModel([t.giveWords() for t in self.alltweets], 1, smoothing='nul', by_document=True)
# #initialise really random topic/document associations
# self.doc_associations = []
# for x in range(self.totaldocs):
# self.doc_associations.append(GHThetaParams.get_random_to_one(self.m))
# print("This is when I first build doc_associations")
# print(self.doc_associations)
#self.doc_associations = [[]for x in range(self.totaldocs)]
#Retrain self.topics
for topic in self.topics:
topic.posteriors = [{word: 0 for word in document} for document in self.universal_lm.tdm]#TODO initialise some list with the universal_lm_dimensions
print(topic.posteriors[0])
#self.do_em() #includes expectation, maximisation etc.
# @staticmethod
# def get_random_to_one(n):
# randomlist = [random.random() for a in range(n)]
# randomsum = sum(randomlist)
# #print([r/randomsum for r in randomlist])
# return [r/randomsum for r in randomlist]
def expectation(self):
#calculate P(t|w,d), ie per topic, and per topic it is per document[word]
#initialise the normalizer as 0 for each word for each document
############denominator = [{(word, doc_index): 0 for word in document} for doc_index, document in enumerate(self.universal_lm.tdm)]
for doc_index, document in enumerate(self.alltweets):
for word in self.universal_lm.tdm[doc_index]:
numerators = []
for topic_index, topic in enumerate(self.topics):
numerators.append( topic.lm.probs[1][word] * float(self.doc_associations[doc_index][topic_index]))
denominator = sum(numerators)
for topic_index, topic in enumerate(self.topics):
new_value = float(numerators[topic_index]/denominator)
topic.posteriors[doc_index][word] = new_value
def maximisation(self):
#update P(w|t) from Gildea Hoffman m-step (1)
for topic_index, topic in enumerate(self.topics):
denominator = 0
#make numerator; save denominator:
for word in topic.lm.grams[1]: #each topic model is initialised over all documents, that could be made more efficient
#print([self.universal_lm.tdm[doc_index].get(word, 0) * topic.posteriors[doc_index].get(word, 0) for doc_index in range(self.totaldocs)])
try:
summing = 0
for doc_index in range(self.totaldocs):
count_of_word = float(self.universal_lm.tdm[doc_index].get(word, 0))
e_step_result = topic.posteriors[doc_index].get(word, 0) #this is where it lies....
summing += count_of_word * e_step_result
# topic.lm.interims[word] = sum(
# [self.universal_lm.tdm[doc_index].get(word, 0) * float(topic.posteriors[doc_index].get(word, 0)) for doc_index in range(self.totaldocs)]
# ) #numerator
topic.lm.interims[word] = float(summing)
denominator += topic.lm.interims[word]
except TypeError:
print(topic_index, word, "This word didn't work.")
for word in topic.lm.grams[1]:
topic.lm.probs[1][word] = topic.lm.interims.get(word,0)/denominator
#update P(t|d) from Gildea Hoffman m-step (2)
self.doc_associations = []
print(self.doc_associations, "Should be really empty!")
for doc_index, document in enumerate(self.universal_lm.tdm):
denominator = 0; numerators = []
for topic in self.topics:
numerators.append(sum([self.universal_lm.tdm[doc_index].get(word,0) * topic.posteriors[doc_index][word] for word in document]))
denominator = sum(numerators)
new_topic_associations = [n/denominator for n in numerators] #per topic, for one document
self.doc_associations.append(new_topic_associations)
#update P(t|d)
#reset the information needed for the expectation step?
def log_likelihood(self):
pass
def n_in_topic(self, topic, word, document):
return self.topics[topic].lm.tdm[document].get(word, 0)
| gpl-3.0 | 1,185,433,029,836,612,400 | 51.531353 | 166 | 0.616338 | false |
FAB4D/humanitas | analysis/preproc/merge_series.py | 1 | 6091 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
description = '''
This script merges series of the same product within each region.
Also for plotting.
Author: Ching-Chia
'''
wholesale_daily = True
retail_daily = False
retail_weekly = False
output_by_city = False
merge_by_prod_reg = False
plot_all = False
if wholesale_daily+retail_daily+retail_weekly != 1:
raise Exception('exactly one option being True required!')
elif wholesale_daily:
csv_in = os.getcwd()+'/wholesale_daily/csv_all/india_timeseries_wholesale_daily_interpolated_0.6.csv' #linear interpolated
# csv_in = os.getcwd()+'/wholesale_daily/csv_all/india_timeseries_wholesale_daily_0.59.csv' #not interpolated
# csv_in = os.getcwd()+'/wholesale_daily/csv_all/india_timeseries_wholesale_daily_interpolated_0.599.csv' #spline interpolated
# csv_in = os.getcwd()+'/wholesale_daily/csv_all/india_timeseries_wholesale_daily_interpolated_0.5999.csv' #polynomial interpolated
out_folder = os.getcwd()+'/wholesale_daily/'
elif retail_daily:
csv_in = os.getcwd()+'/retail_daily/csv_all/india_timeseries_retail_daily_interpolated_0.6.csv'
out_folder = os.getcwd()+'/retail_daily/'
elif retail_weekly:
csv_in = os.getcwd()+'/retail_weekly/csv_all/india_timeseries_retail_weekly_interpolated_0.6.csv'
out_folder = os.getcwd()+'/retail_weekly/'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# csv_out_wholesale_daily = 'wholesale_daily'
def subcolnames(df_ts, q1, q2):
cnames = []
for str_label in list(df_ts.columns):
if (q1 in str_label) and (q2 == None or q2 in str_label):
cnames.append(str_label)
return cnames
def subdf(df_ts, q1, q2 = None):
cnames = subcolnames(df_ts, q1, q2)
return df_ts[cnames]
def clear_symbols(string_lst):
symbols = ['(',')',',','\'']
ret = []
for string in string_lst:
for s in symbols:
string = string.replace(s, '')
ret.append(string)
if len(ret) != 4:
raise Exception('incorrect label parsing: '+str(string_lst))
return ret
def parse_strlabel_to_tuple(strlabel):
label = clear_symbols(strlabel.split(', '))
return label[0], label[1], label[2], label[3]
def parse_colnames_to_tuples(df_ts):
cc = []
for strlabel in list(df_ts.columns):
cc.append(parse_strlabel_to_tuple(strlabel))
return cc
def all_state_city_prod_subprod(df_ts):
all_states = set()
all_cities = set()
all_products = set()
all_subproducts = set()
for (state, city, product, subproduct) in list(df_ts.columns):
# = parse_strlabel_to_tuple(strlabel)
all_states.add(state)
all_cities.add(city)
all_products.add(product)
all_subproducts.add(subproduct)
return sorted(list(all_states)), sorted(list(all_cities)), sorted(list(all_products)), sorted(list(all_subproducts))
def run_output_by_city(df_ts, all_cities):
df_duy = pd.DataFrame()
for city in all_cities:
df_city = subdf(df_ts, city)
for (state, city, product, subproduct) in list(df_city.columns):
df_duy[product+'_'+subproduct] = df_city[(state, city, product, subproduct)]
fp = out_folder+'csv_by_city/'
if not os.path.exists(fp):
os.makedirs(fp)
fp = fp+city+'.csv'
df_duy.to_csv(fp, index_label='date')
def run_merge_by_prod_reg(df_ts, all_states, all_products):
df_merge = pd.DataFrame()
for state in all_states:
for product in all_products:
df_this = subdf(df_ts, state, product)
if df_this.shape[1] == 0:
continue
avg_series = df_this.mean(axis=1)
df_merge[(state, product)] = avg_series
return df_merge
def plotter(df, fpath, fname, save=False, close=True, legend=True):
if not os.path.exists(fpath):
os.makedirs(fpath)
fp = fpath+fname
ax = df.plot(legend=legend, title='Wholesale Daily Prices of Products in '+fname[:-4])
ax.set_ylabel('Price (Rupee) / KG')
ax.set_xlabel('Date')
if save:
plt.savefig(fp)
if close:
plt.close()
return True
def read_df_ts(csv_in):
df_ts = pd.read_csv(csv_in)
df_ts.set_index('date', inplace=True)
df_ts.columns = parse_colnames_to_tuples(df_ts)
return df_ts
def clear_slash(string):
return string.replace('/', '')
if __name__ == '__main__':
df_ts = read_df_ts(csv_in)
all_states, all_cities, all_products, all_subproducts = all_state_city_prod_subprod(df_ts)
if output_by_city:
run_output_by_city(df_ts, all_cities)
if merge_by_prod_reg:
df_merge = run_merge_by_prod_reg(df_ts, all_states, all_products)
for state in all_states:
df_state = subdf(df_merge, state)
# df_state.plot()
plotter(df_state, fpath = out_folder+'plot_merged/', fname=state+'.png', save=True)
#save to csv by region
for state in all_states:
df_reg = pd.DataFrame()
for product in all_products:
series = subdf(df_merge, state, product)
if series.shape[1] != 0:
df_reg[product] = series.iloc[:,0]
fp = out_folder+'csv_merged/'
if not os.path.exists(fp):
os.makedirs(fp)
fp = fp+state+'.csv'
df_reg.to_csv(fp, index_label='date')
if plot_all:
for label in list(df_ts.columns):
plotter(df_ts[label], fpath = out_folder+'plot_all/', fname=str(label).replace('/','-')+'.png', save=True)
csv_in2 = os.getcwd()+'/wholesale_daily/csv_all/india_timeseries_wholesale_daily_0.59.csv' #not interpolated
df_ts_dirt = read_df_ts(csv_in2)
p1 = subdf(df_ts, 'Bethuadahari','Fine')
p2 = subdf(df_ts_dirt, 'Bethuadahari','Fine')
ax1 = p1.plot(title='Price Series After Data Cleaning')
ax1.set_ylabel('Price (Rupee) / KG')
ax2 = p2.plot(title='Price Series Before Data Cleaning')
ax2.set_ylabel('Price (Rupee) / KG')
| bsd-3-clause | -6,141,976,568,057,818,000 | 31.227513 | 135 | 0.623379 | false |
MasterFacilityList/mfl_api | config/urls.py | 1 | 2430 | from django.conf.urls import url, patterns, include
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from common.views import APIRoot, root_redirect_view
from rest_auth.views import (
Login, Logout, UserDetails, PasswordChange,
PasswordReset, PasswordResetConfirm
)
rest_auth_patterns = patterns(
# re-written from rest_auth.urls because of cache validation
'',
# URLs that do not require a session or valid token
url(r'^password/reset/$',
cache_page(0)(PasswordReset.as_view()),
name='rest_password_reset'),
url(r'^password/reset/confirm/$',
cache_page(0)(PasswordResetConfirm.as_view()),
name='rest_password_reset_confirm'),
url(r'^login/$',
cache_page(0)(Login.as_view()), name='rest_login'),
# URLs that require a user to be logged in with a valid session / token.
url(r'^logout/$',
cache_page(0)(Logout.as_view()), name='rest_logout'),
url(r'^user/$',
cache_page(0)(UserDetails.as_view()), name='rest_user_details'),
url(r'^password/change/$',
cache_page(0)(PasswordChange.as_view()), name='rest_password_change'),
)
apipatterns = patterns(
'',
url(r'^$', login_required(
cache_page(60*60)(APIRoot.as_view())), name='root_listing'),
url(r'^explore/', include('rest_framework_swagger.urls',
namespace='swagger')),
url(r'^common/', include('common.urls', namespace='common')),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^facilities/', include('facilities.urls', namespace='facilities')),
url(r'^chul/', include('chul.urls', namespace='chul')),
url(r'^gis/', include('mfl_gis.urls', namespace='mfl_gis')),
url(r'^reporting/', include('reporting.urls', namespace='reporting')),
url(r'^rest-auth/', include(rest_auth_patterns, namespace='rest_auth')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls',
namespace='rest_auth_registration'))
)
urlpatterns = patterns(
'',
url(r'^$', root_redirect_view, name='root_redirect'),
url(r'^api/', include(apipatterns, namespace='api')),
url(r'^accounts/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/token/', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
)
| mit | 3,449,026,425,558,011,400 | 40.896552 | 78 | 0.657202 | false |
brian-team/brian2cuda | dev/benchmarks/results_2017_11_28_atomics_for_heterogenous_delay_mode_parallelisations/cuda_atomics_in_heterogenous_delay_mode/run_speed_test_script.py | 1 | 18038 | import os
import shutil
import glob
import subprocess
import sys
import socket
# run tests without X-server
import matplotlib
matplotlib.use('Agg')
# pretty plots
import seaborn
import time
import datetime
import cPickle as pickle
from brian2 import *
from brian2.tests.features import *
from brian2.tests.features.base import *
from brian2.tests.features.base import results
import brian2cuda
from brian2cuda.tests.features.cuda_configuration import (CUDAStandaloneConfiguration,
CUDAStandaloneConfigurationNoAssert,
CUDAStandaloneConfigurationExtraThresholdKernel,
CUDAStandaloneConfigurationCurandDouble,
CUDAStandaloneConfigurationNoCudaOccupancyAPI,
CUDAStandaloneConfigurationNoCudaOccupancyAPIProfileCPU,
CUDAStandaloneConfiguration2BlocksPerSM,
CUDAStandaloneConfiguration2BlocksPerSMLaunchBounds,
CUDAStandaloneConfigurationSynLaunchBounds,
CUDAStandaloneConfiguration2BlocksPerSMSynLaunchBounds,
CUDAStandaloneConfigurationProfileGPU,
CUDAStandaloneConfigurationProfileCPU,
CUDAStandaloneConfigurationTestBrunelHeteroAtomics,
CUDAStandaloneConfigurationTestBrunelHeteroAtomicsProfileCPU,
CUDAStandaloneConfigurationPushAtomicResize,
CUDAStandaloneConfigurationBrunelHeterogAndPushAtomicResize,
CUDAStandaloneConfigurationPushAtomicResizeProfileCPU,
CUDAStandaloneConfigurationPushAtomicResizeAsyncMemcpy,
CUDAStandaloneConfigurationPushAtomicResizeAsyncMemcpyProfileCPU)
from brian2cuda.tests.features.speed import *
from brian2genn.correctness_testing import GeNNConfiguration, GeNNConfigurationCPU, GeNNConfigurationOptimized
from create_readme import create_readme
assert len(sys.argv)<= 2, 'Only one command line argument supported! Got {}'.format(len(sys.argv)-1)
if len(sys.argv) == 2:
additional_dir_name = '_' + sys.argv[1]
else:
additional_dir_name = ''
prefs['devices.cpp_standalone.extra_make_args_unix'] = ['-j12']
# host specific settings
if socket.gethostname() == 'elnath':
prefs['devices.cpp_standalone.extra_make_args_unix'] = ['-j24']
prefs['codegen.cuda.extra_compile_args_nvcc'].remove('-arch=sm_35')
prefs['codegen.cuda.extra_compile_args_nvcc'].extend(['-arch=sm_20'])
configs = [# configuration project_directory
#(NumpyConfiguration, None),
#(WeaveConfiguration, None),
#(LocalConfiguration, None),
#(CPPStandaloneConfiguration, 'cpp_standalone'),
#(CPPStandaloneConfigurationOpenMP, 'cpp_standalone'),
(CUDAStandaloneConfiguration, 'cuda_standalone'),
(CUDAStandaloneConfigurationPushAtomicResize, 'cuda_standalone'),
(CUDAStandaloneConfigurationTestBrunelHeteroAtomics, 'cuda_standalone'),
(CUDAStandaloneConfigurationBrunelHeterogAndPushAtomicResize, 'cuda_standalone'),
#(CUDAStandaloneConfigurationExtraThresholdKernel, 'cuda_standalone'),
#(CUDAStandaloneConfigurationNoAssert, 'cuda_standalone'),
#(CUDAStandaloneConfigurationCurandDouble, 'cuda_standalone'),
#(CUDAStandaloneConfigurationNoCudaOccupancyAPI, 'cuda_standalone'),
#(CUDAStandaloneConfigurationNoCudaOccupancyAPIProfileCPU, 'cuda_standalone'),
#(CUDAStandaloneConfiguration2BlocksPerSM, 'cuda_standalone'),
#(CUDAStandaloneConfiguration2BlocksPerSMLaunchBounds, 'cuda_standalone'),
#(CUDAStandaloneConfigurationSynLaunchBounds, 'cuda_standalone'),
#(CUDAStandaloneConfiguration2BlocksPerSMSynLaunchBounds, 'cuda_standalone'),
#(CUDAStandaloneConfigurationProfileGPU, 'cuda_standalone'),
#(CUDAStandaloneConfigurationProfileCPU, 'cuda_standalone'),
#(CUDAStandaloneConfigurationTestBrunelHeteroAtomicsProfileCPU, 'cuda_standalone'),
#(CUDAStandaloneConfigurationPushAtomicResizeProfileCPU, 'cuda_standalone'),
#(CUDAStandaloneConfigurationPushAtomicResizeAsyncMemcpy, 'cuda_standalone'),
#(CUDAStandaloneConfigurationPushAtomicResizeAsyncMemcpyProfileCPU, 'cuda_standalone'),
#(GeNNConfiguration, 'GeNNworkspace'),
#(GeNNConfigurationCPU, 'GeNNworkspace'),
#(GeNNConfigurationOptimized, 'GeNNworkspace')
]
speed_tests = [# feature_test name n_slice
#(ThresholderOnlyPoissonLowRate, 'ThresholderOnlyPoissonLowRate', slice(None) ),
#(ThresholderOnlyPoissonMediumRate, 'ThresholderOnlyPoissonMediumRate', slice(None) ),
#(ThresholderOnlyPoissonHighRate, 'ThresholderOnlyPoissonHighRate', slice(None) ),
#(ThresholderOnlyAlwaysSpiking, 'ThresholderOnlyAlwaysSpiking', slice(None) ),
#(BrunelHakimStateupdateOnlyDouble, 'BrunelHakimStateupdateOnlyDouble', slice(None) ),
#(BrunelHakimStateupdateOnlyTriple, 'BrunelHakimStateupdateOnlyTriple', slice(None) ),
#(BrunelHakimStateupdateOnly, 'BrunelHakimStateupdateOnly', slice(None) ),
#(BrunelHakimNeuronsOnly, 'BrunelHakimNeuronsOnly', slice(None) ),
#(BrunelHakimNeuronsOnlyNoXi, 'BrunelHakimNeuronsOnlyNoXi', slice(None) ),
#(BrunelHakimNeuronsOnlyNoRand, 'BrunelHakimNeuronsOnlyNoRand', slice(None) ),
#(BrunelHakimStateupdateThresholdOnly, 'BrunelHakimStateupdateThresholdOnly', slice(None) ),
#(BrunelHakimStateupdateThresholdResetOnly, 'BrunelHakimStateupdateThresholdResetOnly', slice(None) ),
#(BrunelHakimModelScalarDelayShort, 'BrunelHakimModelScalarDelayShort', slice(None) ),
#(BrunelHakimModelScalarDelayNoSelfConnections, 'BrunelHakimModelScalarDelayNoSelfConnections', slice(None) ),
#(CUBA, 'CUBA', slice(None) ),
#(COBAHH, 'COBAHH', slice(None) ),
#(AdaptationOscillation, 'AdaptationOscillation', slice(None) ),
#(Vogels, 'Vogels', slice(None) ),
#(STDP, 'STDP', slice(None) ),
#(STDPEventDriven, 'STDPEventDriven', slice(None) ),
#(BrunelHakimModelScalarDelay, 'BrunelHakimModelScalarDelay', slice(None) ),
#(VerySparseMediumRateSynapsesOnly, 'VerySparseMediumRateSynapsesOnly', slice(None) ),
#(SparseMediumRateSynapsesOnly, 'SparseMediumRateSynapsesOnly', slice(None) ),
#(DenseMediumRateSynapsesOnly, 'DenseMediumRateSynapsesOnly', slice(None) ),
#(SparseLowRateSynapsesOnly, 'SparseLowRateSynapsesOnly', slice(None) ),
#(SparseHighRateSynapsesOnly, 'SparseHighRateSynapsesOnly', slice(None) ),
#(STDPNotEventDriven, 'STDPNotEventDriven', slice(None) ),
#(STDPMultiPost, 'STDPMultiPost', slice(None) ),
#(STDPNeuronalTraces, 'STDPNeuronalTraces', slice(None) ),
#(STDPMultiPostNeuronalTraces, 'STDPMultiPostNeuronalTraces', slice(None) ),
(BrunelHakimModelHeterogeneousDelay, 'BrunelHakimModelHeterogeneousDelay', slice(None) ),
#(LinearNeuronsOnly, 'LinearNeuronsOnly', slice(None) ),
#(HHNeuronsOnly, 'HHNeuronsOnly', slice(None) ),
#(VogelsWithSynapticDynamic, 'VogelsWithSynapticDynamic', slice(None) ),
### below uses monitors
#(CUBAFixedConnectivity, 'CUBAFixedConnectivity', slice(None) ),
#(COBAHHFixedConnectivity, 'COBAHHFixedConnectivity', slice(None, -1) ),
]
configurations = [config[0] for config in configs]
project_dirs = [config[1] for config in configs]
# check if multiple Configurations with same project_dirs are specified
last_idx = {}
for proj_dir in project_dirs:
if proj_dir is not None:
first_i = project_dirs.index(proj_dir)
last_i = len(project_dirs) - 1 - project_dirs[::-1].index(proj_dir)
if first_i != last_i:
print("WARNING there are multiple configurations using {d} as project "
"directory. Profiling and logfiles will only be saved for the last one {c}.".format(
d=proj_dir, c=configurations[last_i].__name__))
last_idx[proj_dir] = last_i
time_stemp = time.time()
date_str = datetime.datetime.fromtimestamp(time_stemp).strftime('%Y_%m_%d')
directory = 'results_{}{}'.format(date_str, additional_dir_name)
if os.path.exists(directory):
new_dir = directory + '_bak_' + str(int(time.time()))
print("Directory with name `{}` already exists. Renaming it to `{}`.".format(directory, new_dir))
os.rename(directory, new_dir)
os.makedirs(directory)
data_dir = os.path.join(directory, 'data')
plot_dir = os.path.join(directory, 'plots')
log_dir = os.path.join(directory, 'logs')
prof_dir = os.path.join(directory, 'nvprof')
os.makedirs(data_dir)
os.makedirs(plot_dir)
os.makedirs(log_dir)
os.makedirs(prof_dir)
print("Saving results in {}.".format(plot_dir))
shutil.copy(os.path.realpath(__file__), os.path.join(directory, 'run_speed_test_script.py'))
time_format = '%d.%m.%Y at %H:%M:%S'
script_start = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
with open(os.path.join(directory, 'git.diff'), 'w') as diff_file:
subprocess.call(['git', 'diff'], stdout=diff_file)
try:
for n, (st, name, sl) in enumerate(speed_tests):
start = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
print("Starting {} on {}.".format(name, start))
maximum_run_time = 1*60*60*second
res = run_speed_tests(configurations=configurations,
speed_tests=[st],
n_slice=sl,
#n_slice=slice(0,1,None),
run_twice=False,
verbose=True,
maximum_run_time=maximum_run_time#,
## this needs modification of brian2 code
#profile_only_active=True
#profile_only_active=False
)
end = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
diff = datetime.datetime.strptime(end, time_format) - datetime.datetime.strptime(start, time_format)
print("Running {} took {}.".format(name, diff))
res.plot_all_tests()
## this needs modification of brian2 code
#res.plot_all_tests(print_relative=True)
savefig(os.path.join(plot_dir, 'speed_test_{}_absolute.png'.format(speed_tests[n][1])))
res.plot_all_tests(relative=True)
savefig(os.path.join(plot_dir, 'speed_test_{}_relative.png'.format(name)))
res.plot_all_tests(profiling_minimum=0.05)
savefig(os.path.join(plot_dir, 'speed_test_{}_profiling.png'.format(name)))
res.plot_all_tests()
## this needs modification of brian2 code
#res.plot_all_tests(print_relative=True)
savefig(os.path.join(plot_dir, 'speed_test_{}_absolute.svg'.format(speed_tests[n][1])))
res.plot_all_tests(relative=True)
savefig(os.path.join(plot_dir, 'speed_test_{}_relative.svg'.format(name)))
res.plot_all_tests(profiling_minimum=0.05)
savefig(os.path.join(plot_dir, 'speed_test_{}_profiling.svg'.format(name)))
if 3 != len(get_fignums()):
print("WARNING: There were {} plots created, but only {} saved.".format(len(get_fignums()), 3*(n+1)))
for n in get_fignums():
close(n)
# pickel results object to disk
pkl_file = os.path.join(data_dir, name + '.pkl' )
with open(pkl_file, 'wb') as output:
pickle.dump(res, output, pickle.HIGHEST_PROTOCOL)
# save stdout log of last run (the other are deleted in run_speed_tests())
for proj_dir in set(project_dirs):
if not proj_dir is None and proj_dir in ['cuda_standalone', 'cpp_standalone']:
config = configurations[last_idx[proj_dir]]
stdout_file = os.path.join(proj_dir, 'results/stdout.txt')
if os.path.exists(stdout_file):
shutil.copy(stdout_file,
os.path.join(log_dir, 'stdout_{st}_{conf}_{n}.txt'.format(st=name, conf=proj_dir,
n=st.n_range[sl][-1])))
else:
print("WARNING Couldn't save {},file not found.".format(stdout_file))
# run nvprof on n_range[2]
for conf, proj_dir in zip(configurations, project_dirs):
main_arg = ''
if proj_dir in ['cuda_standalone', 'GeNNworkspace']:
if proj_dir == 'GeNNworkspace':
main_arg = 'test {time} 1'.format(time=st.duration/second)
ns = st.n_range[sl]
idx = 2
max_runtime = 20
conf_name = conf.__name__
print("Rerunning {} with n = {} for nvprof profiling".format(conf_name, st.n_range[idx]))
tb, res, runtime, prof_info = results(conf, st, st.n_range[idx], maximum_run_time=maximum_run_time)
if not isinstance(res, Exception) and runtime < max_runtime:
option = '--profile-from-start-off' if proj_dir == 'cuda_standalone' else ''
cmd = 'cd {proj_dir} && nvprof {opt} --log-file ../{log_file} ./main {arg}'.format(
proj_dir=proj_dir, arg=main_arg, opt=option,
log_file=os.path.join(prof_dir, 'nvprof_{st}_{conf}_{n}.log'.format(
st=name, conf=conf_name, n=st.n_range[idx])))
prof_start = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
print(cmd)
x = os.system(cmd)
if x:
print('nvprof failed with {}'.format(x))
prof_end = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
prof_diff = datetime.datetime.strptime(prof_end, time_format) - datetime.datetime.strptime(prof_start, time_format)
print("Profiling took {} for runtime of {}".format(prof_diff, runtime))
finally:
create_readme(directory)
print("\nSummarized speed test results in {}".format(directory + '/README.md'))
script_end = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
script_diff = datetime.datetime.strptime(script_end, time_format) - datetime.datetime.strptime(script_start, time_format)
print("Finished speed test on {}. Total time = {}.".format(
datetime.datetime.fromtimestamp(time.time()).strftime(time_format), script_diff))
##res.plot_all_tests(relative=True)
#for n in get_fignums():
# plt.figure(n)
# savefig(plot_dir + '/speed_test_{}.png'.format(speed_tests[n-1][1]))
## Debug (includes profiling infos)
#from brian2.tests.features.base import results
#for x in results(LocalConfiguration, LinearNeuronsOnly, 10, maximum_run_time=10*second):
# print x
| gpl-2.0 | -2,705,125,021,239,114,000 | 62.291228 | 135 | 0.543852 | false |
bcgov/gwells | app/backend/registries/views.py | 1 | 30811 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import reversion
from collections import OrderedDict
from django.db.models import Q, Prefetch
from django.http import HttpResponse, Http404, JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.views.generic import TemplateView
from django_filters import rest_framework as restfilters
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from reversion.views import RevisionMixin
from rest_framework import filters, status, exceptions
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from drf_multiple_model.views import ObjectMultipleModelAPIView
from gwells.documents import MinioClient
from gwells.roles import REGISTRIES_VIEWER_ROLE
from gwells.models import ProvinceStateCode
from gwells.pagination import APILimitOffsetPagination
from gwells.roles import REGISTRIES_EDIT_ROLE, REGISTRIES_VIEWER_ROLE
from gwells.settings.base import get_env_variable
from reversion.models import Version
from registries.models import (
AccreditedCertificateCode,
ActivityCode,
ApplicationStatusCode,
Organization,
OrganizationNote,
Person,
PersonNote,
ProofOfAgeCode,
Register,
RegistriesApplication,
RegistriesRemovalReason,
SubactivityCode,
WellClassCode)
from registries.permissions import RegistriesEditPermissions, RegistriesEditOrReadOnly
from registries.serializers import (
ApplicationAdminSerializer,
ApplicationStatusCodeSerializer,
ApplicationListSerializer,
CityListSerializer,
ProofOfAgeCodeSerializer,
OrganizationListSerializer,
OrganizationAdminSerializer,
OrganizationNameListSerializer,
PersonAdminSerializer,
PersonListSerializer,
RegistrationAdminSerializer,
RegistriesRemovalReasonSerializer,
PersonNoteSerializer,
ProvinceStateCodeSerializer,
SubactivitySerializer,
WellClassCodeSerializer,
AccreditedCertificateCodeSerializer,
OrganizationNoteSerializer,
PersonNameSerializer)
from gwells.change_history import generate_history_diff
from gwells.views import AuditCreateMixin, AuditUpdateMixin
class OrganizationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView):
"""
get:
Returns a list of all registered drilling organizations
post:
Creates a new drilling organization record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = OrganizationListSerializer
pagination_class = None
# prefetch related objects for the queryset to prevent duplicate database trips later
queryset = Organization.objects.all() \
.select_related('province_state',) \
.prefetch_related('registrations', 'registrations__person')
# Allow searching against fields like organization name, address,
# name or registration of organization contacts
filter_backends = (filters.SearchFilter,)
search_fields = (
'name',
'street_address',
'city',
'registrations__person__first_name',
'registrations__person__surname',
'registrations__applications__file_no'
)
def get_queryset(self):
return self.queryset.filter(expiry_date__gt=timezone.now())
class OrganizationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns the specified drilling organization
put:
Replaces the specified record with a new one
patch:
Updates a drilling organization with the fields/values provided in the request body
delete:
Removes the specified drilling organization record
"""
permission_classes = (RegistriesEditPermissions,)
# 'pk' and 'id' have been replaced by 'org_guid' as primary key for Organization model
lookup_field = "org_guid"
serializer_class = OrganizationAdminSerializer
# prefetch related province, contacts and person records to prevent future additional database trips
queryset = Organization.objects.all() \
.select_related('province_state',) \
.prefetch_related('registrations', 'registrations__person')
def get_queryset(self):
return self.queryset.filter(expiry_date__gt=timezone.now())
def destroy(self, request, *args, **kwargs):
"""
Set expiry_date to current date
"""
instance = self.get_object()
for reg in instance.registrations.all():
if reg.person.expiry_date is None:
raise exceptions.ValidationError(
('Organization has registrations associated with it. ')
('Remove this organization from registration records first.'))
instance.expiry_date = timezone.now()
instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class PersonOptionsView(APIView):
@swagger_auto_schema(auto_schema=None)
def get(self, request, format=None, **kwargs):
result = {}
for activity in ActivityCode.objects.all():
# Well class query
well_class_query = WellClassCode \
.objects.filter(
qualification__subactivity__registries_activity=activity.registries_activity_code) \
.order_by('registries_well_class_code').distinct('registries_well_class_code')
# Sub activity query
sub_activity_query = SubactivityCode \
.objects.filter(
registries_activity=activity).order_by('display_order')
# Certificate code query
cert_code_query = AccreditedCertificateCode \
.objects.filter(
registries_activity=activity.registries_activity_code) \
.order_by('name')
result[activity.registries_activity_code] = {
'well_class_codes':
list(map(lambda item: WellClassCodeSerializer(
item).data, well_class_query)),
'subactivity_codes':
list(map(lambda item: SubactivitySerializer(
item).data, sub_activity_query)),
'accredited_certificate_codes':
list(map(lambda item: AccreditedCertificateCodeSerializer(
item).data, cert_code_query))
}
result['proof_of_age_codes'] = \
list(map(lambda item: ProofOfAgeCodeSerializer(item).data,
ProofOfAgeCode.objects.all().order_by('display_order')))
result['approval_outcome_codes'] = \
list(map(lambda item: ApplicationStatusCodeSerializer(item).data,
ApplicationStatusCode.objects.all()))
result['reason_removed_codes'] = \
list(map(lambda item: RegistriesRemovalReasonSerializer(item).data,
RegistriesRemovalReason.objects.all()))
result['province_state_codes'] = \
list(map(lambda item: ProvinceStateCodeSerializer(item).data,
ProvinceStateCode.objects.all().order_by('display_order')))
return Response(result)
def person_search_qs(request):
""" Returns Person queryset, removing non-active and unregistered drillers for anonymous users """
query = request.GET
qs = Person.objects.filter(expiry_date__gt=timezone.now())
# base registration and application querysets
registrations_qs = Register.objects.all()
applications_qs = RegistriesApplication.objects.all()
# Search for cities (split list and return all matches)
# search comes in as a comma-separated querystring param e.g: ?city=Atlin,Lake Windermere,Duncan
cities = query.get('city', None)
if cities:
cities = cities.split(',')
qs = qs.filter(registrations__organization__city__in=cities)
registrations_qs = registrations_qs.filter(
organization__city__in=cities)
activity = query.get('activity', None)
status = query.get('status', None)
user_is_staff = request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists()
if activity:
if (status == 'P' or not status) and user_is_staff:
# We only allow staff to filter on status
# For pending, or all, we also return search where there is no registration.
qs = qs.filter(Q(registrations__registries_activity__registries_activity_code=activity) |
Q(registrations__isnull=True))
registrations_qs = registrations_qs.filter(
registries_activity__registries_activity_code=activity)
else:
# For all other searches, we strictly filter on activity.
qs = qs.filter(
registrations__registries_activity__registries_activity_code=activity)
registrations_qs = registrations_qs.filter(
registries_activity__registries_activity_code=activity)
if user_is_staff:
# User is logged in
if status:
if status == 'Removed':
# Things are a bit more complicated if we're looking for removed, as the current
# status doesn't come in to play.
qs = qs.filter(
registrations__applications__removal_date__isnull=False)
else:
if status == 'P':
# If the status is pending, we also pull in any people without registrations
# or applications.
qs = qs.filter(Q(registrations__applications__current_status__code=status) |
Q(registrations__isnull=True) |
Q(registrations__applications__isnull=True),
Q(registrations__applications__removal_date__isnull=True))
else:
qs = qs.filter(
Q(registrations__applications__current_status__code=status),
Q(registrations__applications__removal_date__isnull=True))
else:
# User is not logged in
# Only show active drillers to non-admin users and public
qs = qs.filter(
Q(registrations__applications__current_status__code='A',
registrations__registries_activity=activity),
Q(registrations__applications__removal_date__isnull=True),
Q()
)
registrations_qs = registrations_qs.filter(
Q(applications__current_status__code='A'),
Q(applications__removal_date__isnull=True))
applications_qs = applications_qs.filter(
current_status='A', removal_date__isnull=True)
# generate applications queryset
applications_qs = applications_qs \
.select_related(
'current_status',
'primary_certificate',
'primary_certificate__cert_auth',
'subactivity',
) \
.prefetch_related(
'subactivity__qualification_set',
'subactivity__qualification_set__well_class'
).distinct()
# generate registrations queryset, inserting filtered applications queryset defined above
registrations_qs = registrations_qs \
.select_related(
'registries_activity',
'organization',
'organization__province_state',
) \
.prefetch_related(
Prefetch('applications', queryset=applications_qs)
).distinct()
# insert filtered registrations set
qs = qs \
.prefetch_related(
Prefetch('registrations', queryset=registrations_qs)
)
return qs.distinct()
class PersonListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView):
"""
get:
Returns a list of all person records
post:
Creates a new person record
"""
permission_classes = (RegistriesEditOrReadOnly,)
serializer_class = PersonAdminSerializer
pagination_class = APILimitOffsetPagination
# Allow searching on name fields, names of related companies, etc.
filter_backends = (restfilters.DjangoFilterBackend,
filters.SearchFilter, filters.OrderingFilter)
ordering_fields = ('surname', 'registrations__organization__name')
ordering = ('surname',)
search_fields = (
'first_name',
'surname',
'registrations__organization__name',
'registrations__organization__city',
'registrations__registration_no'
)
# fetch related companies and registration applications (prevent duplicate database trips)
queryset = Person.objects.all()
def get_queryset(self):
""" Returns Person queryset, removing non-active and unregistered drillers for anonymous users """
return person_search_qs(self.request)
@swagger_auto_schema(responses={200: PersonListSerializer(many=True)})
def get(self, request, *args, **kwargs):
# Returns self.list - overridden for schema documentation
return self.list(request, *args, **kwargs)
def list(self, request, **kwargs):
""" List response using serializer with reduced number of fields """
queryset = self.get_queryset()
filtered_queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(filtered_queryset)
if page is not None:
serializer = PersonListSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = PersonListSerializer(filtered_queryset, many=True)
return Response(serializer.data)
class PersonDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns the specified person
put:
Replaces the specified person record with a new one
patch:
Updates a person with the fields/values provided in the request body
delete:
Removes the specified person record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = PersonAdminSerializer
# pk field has been replaced by person_guid
lookup_field = "person_guid"
queryset = Person.objects \
.all() \
.prefetch_related(
'notes',
'notes__author',
'registrations',
'registrations__registries_activity',
'registrations__organization',
'registrations__applications',
'registrations__applications__current_status',
'registrations__applications__primary_certificate',
'registrations__applications__primary_certificate__cert_auth',
'registrations__applications__subactivity',
'registrations__applications__subactivity__qualification_set',
'registrations__applications__subactivity__qualification_set__well_class'
).distinct()
def get_queryset(self):
"""
Returns only registered people (i.e. drillers with active registration) to anonymous users
"""
qs = self.queryset.filter(expiry_date__gt=timezone.now())
if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():
qs = qs.filter(Q(applications__current_status__code='A'),
Q(applications__removal_date__isnull=True))
return qs
def destroy(self, request, *args, **kwargs):
"""
Set expiry_date to current date
"""
instance = self.get_object()
instance.expiry_date = timezone.now()
instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class CitiesListView(ListAPIView):
"""
List of cities with a qualified, registered operator (driller or installer)
get: returns a list of cities with a qualified, registered operator (driller or installer)
"""
serializer_class = CityListSerializer
lookup_field = 'register_guid'
pagination_class = None
swagger_schema = None
permission_classes = (RegistriesEditOrReadOnly,)
queryset = Register.objects \
.exclude(organization__city__isnull=True) \
.exclude(organization__city='') \
.select_related(
'organization', 'organization__province_state'
) \
.distinct('organization__city') \
.order_by('organization__city')
def get_queryset(self):
"""
Returns only registered operators (i.e. drillers with active registration) to anonymous users
if request has a kwarg 'activity' (accepts values 'drill' and 'install'), queryset
will filter for that activity
"""
qs = self.queryset
if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():
qs = qs.filter(
Q(applications__current_status__code='A'),
Q(applications__removal_date__isnull=True))
if self.kwargs.get('activity') == 'drill':
qs = qs.filter(registries_activity='DRILL')
if self.kwargs.get('activity') == 'install':
qs = qs.filter(registries_activity='PUMP')
return qs
class RegistrationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView):
"""
get:
List all registration records
post:
Create a new well driller or well pump installer registration record for a person
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = RegistrationAdminSerializer
queryset = Register.objects.all() \
.select_related(
'person',
'registries_activity',
'organization',) \
.prefetch_related(
'applications',
'applications__current_status',
'applications__primary_certificate',
'applications__primary_certificate__cert_auth',
'applications__subactivity',
'applications__subactivity__qualification_set',
'applications__subactivity__qualification_set__well_class'
)
class RegistrationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns a well driller or well pump installer registration record
put:
Replaces a well driller or well pump installer registration record with a new one
patch:
Updates a registration record with new values
delete:
Removes the specified registration record from the database
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = RegistrationAdminSerializer
lookup_field = 'register_guid'
queryset = Register.objects.all() \
.select_related(
'person',
'registries_activity',
'organization',) \
.prefetch_related(
'applications',
'applications__current_status',
'applications__primary_certificate',
'applications__primary_certificate__cert_auth',
'applications__subactivity',
'applications__subactivity__qualification_set',
'applications__subactivity__qualification_set__well_class'
)
class ApplicationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView):
"""
get:
Returns a list of all registration applications
post:
Creates a new registries application
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = ApplicationAdminSerializer
queryset = RegistriesApplication.objects.all() \
.select_related(
'registration',
'registration__person',
'registration__registries_activity')
class ApplicationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns the specified drilling application
put:
Replaces the specified record with a new one
patch:
Updates a drilling application with the set of values provided in the request body
delete:
Removes the specified drilling application record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = ApplicationAdminSerializer
queryset = RegistriesApplication.objects.all() \
.select_related(
'registration',
'registration__person',
'registration__registries_activity')
lookup_field = "application_guid"
class OrganizationNameListView(ListAPIView):
"""
A list of organizations with only organization names
"""
permission_classes = (RegistriesEditOrReadOnly,)
serializer_class = OrganizationNameListSerializer
queryset = Organization.objects \
.select_related('province_state')
pagination_class = None
lookup_field = 'organization_guid'
def get_queryset(self):
return self.queryset.filter(expiry_date__gt=timezone.now())
class PersonNoteListView(AuditCreateMixin, ListCreateAPIView):
"""
get:
Returns notes associated with a Person record
post:
Adds a note record to the specified Person record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = PersonNoteSerializer
swagger_schema = None
def get_queryset(self):
person = self.kwargs['person_guid']
return PersonNote.objects.filter(person=person).order_by('-date')
def perform_create(self, serializer):
""" Add author to serializer data """
person = self.kwargs['person_guid']
serializer.validated_data['person'] = Person.objects.get(
person_guid=person)
serializer.validated_data['author'] = self.request.user
return super(PersonNoteListView, self).perform_create(serializer)
class PersonNoteDetailView(AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns a PersonNote record
put:
Replaces a PersonNote record with a new one
patch:
Updates a PersonNote record with the set of fields provided in the request body
delete:
Removes a PersonNote record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = PersonNoteSerializer
swagger_schema = None
def get_queryset(self):
person = self.kwargs['person']
return PersonNote.objects.filter(person=person)
class OrganizationNoteListView(AuditCreateMixin, ListCreateAPIView):
"""
get:
Returns notes associated with a Organization record
post:
Adds a note record to the specified Organization record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = OrganizationNoteSerializer
swagger_schema = None
def get_queryset(self):
org = self.kwargs['org_guid']
return OrganizationNote.objects.filter(organization=org).order_by('-date')
def perform_create(self, serializer):
""" Add author to serializer data """
org = self.kwargs['org_guid']
serializer.validated_data['organization'] = Organization.objects.get(
org_guid=org)
serializer.validated_data['author'] = self.request.user
return super(OrganizationNoteListView, self).perform_create(serializer)
class OrganizationNoteDetailView(AuditUpdateMixin, RetrieveUpdateDestroyAPIView):
"""
get:
Returns a OrganizationNote record
put:
Replaces a OrganizationNote record with a new one
patch:
Updates a OrganizationNote record with the set of fields provided in the request body
delete:
Removes a OrganizationNote record
"""
permission_classes = (RegistriesEditPermissions,)
serializer_class = OrganizationNoteSerializer
swagger_schema = None
def get_queryset(self):
org = self.kwargs['org_guid']
return OrganizationNote.objects.filter(organization=org)
class OrganizationHistory(APIView):
"""
get: returns a history of changes to an Organization model record
"""
permission_classes = (RegistriesEditPermissions,)
queryset = Organization.objects.all()
swagger_schema = None
def get(self, request, org_guid, **kwargs):
try:
organization = Organization.objects.get(org_guid=org_guid)
except Organization.DoesNotExist:
raise Http404("Organization not found")
# query records in history for this model.
organization_history = [obj for obj in organization.history.all().order_by(
'-revision__date_created')]
history_diff = generate_history_diff(organization_history)
return Response(history_diff)
class PersonHistory(APIView):
"""
get: returns a history of changes to a Person model record
"""
permission_classes = (RegistriesEditPermissions,)
queryset = Person.objects.all()
swagger_schema = None
def get(self, request, person_guid, **kwargs):
"""
Retrieves version history for the specified Person record and creates a list of diffs
for each revision.
"""
try:
person = Person.objects.get(person_guid=person_guid)
except Person.DoesNotExist:
raise Http404("Person not found")
# query records in history for this model.
person_history = [obj for obj in person.history.all().order_by(
'-revision__date_created')]
person_history_diff = generate_history_diff(
person_history, 'Person profile')
registration_history = []
registration_history_diff = []
application_history = []
application_history_diff = []
# generate diffs for version history in each of the individual's registrations
for reg in person.registrations.all():
registration_history = [
obj for obj in reg.history.all()]
registration_history_diff += generate_history_diff(
registration_history, reg.registries_activity.description + ' registration')
for app in reg.applications.all():
application_history = [
obj for obj in app.history.all()]
application_history_diff += generate_history_diff(
application_history, app.subactivity.description + ' application')
# generate application diffs
history_diff = sorted(
person_history_diff +
registration_history_diff +
application_history_diff, key=lambda x: x['date'], reverse=True)
return Response(history_diff)
class PersonNameSearch(ListAPIView):
"""Search for a person in the Register"""
permission_classes = (RegistriesEditOrReadOnly,)
serializer_class = PersonNameSerializer
pagination_class = None
lookup_field = 'person_guid'
ordering = ('surname',)
def get_queryset(self):
"""
This view returns all names with expired records filtered out.
"""
return Person.objects.filter(expiry_date__gt=timezone.now())
class ListFiles(APIView):
"""
List documents associated with an aquifer
get: list files found for the aquifer identified in the uri
"""
@swagger_auto_schema(responses={200: openapi.Response('OK',
openapi.Schema(type=openapi.TYPE_OBJECT, properties={
'public': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'url': openapi.Schema(type=openapi.TYPE_STRING),
'name': openapi.Schema(type=openapi.TYPE_STRING)
}
)),
'private': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'url': openapi.Schema(type=openapi.TYPE_STRING),
'name': openapi.Schema(type=openapi.TYPE_STRING)
}
))
})
)})
def get(self, request, person_guid, **kwargs):
user_is_staff = self.request.user.groups.filter(
Q(name=REGISTRIES_EDIT_ROLE) | Q(name=REGISTRIES_VIEWER_ROLE)).exists()
client = MinioClient(
request=request, disable_private=(not user_is_staff))
documents = client.get_documents(
person_guid, resource="driller", include_private=user_is_staff)
return Response(documents)
class PreSignedDocumentKey(APIView):
"""
Get a pre-signed document key to upload into an S3 compatible document store
post: obtain a URL that is pre-signed to allow client-side uploads
"""
queryset = Person.objects.all()
permission_classes = (RegistriesEditPermissions,)
@swagger_auto_schema(auto_schema=None)
def get(self, request, person_guid, **kwargs):
person = get_object_or_404(self.queryset, pk=person_guid)
client = MinioClient(
request=request, disable_private=False)
object_name = request.GET.get("filename")
filename = client.format_object_name(object_name, person.person_guid, "driller")
bucket_name = get_env_variable("S3_REGISTRANT_BUCKET")
# All documents are private for drillers
url = client.get_presigned_put_url(
filename, bucket_name=bucket_name, private=True)
return JsonResponse({"object_name": object_name, "url": url})
class DeleteDrillerDocument(APIView):
"""
Delete a document from a S3 compatible store
delete: remove the specified object from the S3 store
"""
queryset = Person.objects.all()
permission_classes = (RegistriesEditPermissions,)
@swagger_auto_schema(auto_schema=None)
def delete(self, request, person_guid, **kwargs):
person = get_object_or_404(self.queryset, pk=person_guid)
client = MinioClient(
request=request, disable_private=False)
is_private = False
bucket_name = get_env_variable("S3_REGISTRANT_BUCKET")
if request.GET.get("private") == "true":
is_private = True
bucket_name = get_env_variable("S3_PRIVATE_REGISTRANT_BUCKET")
object_name = request.GET.get("filename")
client.delete_document(object_name, bucket_name=bucket_name, private=is_private)
return HttpResponse(status=204)
| apache-2.0 | -2,987,966,131,840,463,000 | 34.333716 | 106 | 0.65561 | false |
intelligenia/modeltranslation | setup.py | 1 | 2484 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 by intelligenia <[email protected]>
#
# The MIT License (MIT)
#
# Copyright (c) 2016 intelligenia soluciones informáticas
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
data_files = []
for dirpath, dirnames, filenames in os.walk('.'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
continue
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name="modeltranslation",
version="0.25",
author="intelligenia S.L.",
author_email="[email protected]",
description="Modeltranslation is an utility to translate Django model fields.",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
],
install_requires=[
'django-cuser', 'django-tinymce'
],
license="MIT",
keywords="modeltranslation translations",
url='https://github.com/intelligenia/modeltranslation',
packages=find_packages('.'),
data_files=data_files,
include_package_data=True,
)
| mit | 5,486,425,722,198,557,000 | 36.059701 | 83 | 0.710028 | false |
vandorjw/django-template-project | project/project_name/settings/local.py | 1 | 1847 | """Development settings and globals."""
from os.path import join, normpath
from os import environ
from base import *
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## DEBUG CONFIGURATION
DEBUG = True
THUMBNAIL_DEBUG = True
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_setting('{{ project_name }}_DB_NAME'),
'USER': get_env_setting('{{ project_name }}_DB_USER'),
'PASSWORD': get_env_setting('{{ project_name }}_DB_PASS'),
'HOST': get_env_setting('{{ project_name }}_DB_HOST'),
'PORT': get_env_setting('{{ project_name }}_DB_PORT'),
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
INSTALLED_APPS += (
'debug_toolbar',
)
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## END TOOLBAR CONFIGURATION
########## SECRET CONFIGURATION
SECRET_KEY = get_env_setting('{{ project_name }}_SECRET_KEY')
########## END SECRET CONFIGURATION
| mit | -7,193,591,841,739,247,000 | 26.984848 | 67 | 0.621548 | false |
deepfield/ibis | ibis/pandas/execution/tests/conftest.py | 1 | 6784 | from __future__ import absolute_import
import decimal
import pytest
import pandas as pd
import ibis
import ibis.expr.datatypes as dt
import os
@pytest.fixture(scope='module')
def df():
return pd.DataFrame({
'plain_int64': list(range(1, 4)),
'plain_strings': list('abc'),
'plain_float64': [4.0, 5.0, 6.0],
'plain_datetimes_naive': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
),
'plain_datetimes_ny': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
).dt.tz_localize('America/New_York'),
'plain_datetimes_utc': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
).dt.tz_localize('UTC'),
'dup_strings': list('dad'),
'dup_ints': [1, 2, 1],
'float64_as_strings': ['100.01', '234.23', '-999.34'],
'int64_as_strings': list(map(str, range(1, 4))),
'strings_with_space': [' ', 'abab', 'ddeeffgg'],
'int64_with_zeros': [0, 1, 0],
'float64_with_zeros': [1.0, 0.0, 1.0],
'float64_positive': [1.0, 2.0, 1.0],
'strings_with_nulls': ['a', None, 'b'],
'datetime_strings_naive': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
).astype(str),
'datetime_strings_ny': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
).dt.tz_localize('America/New_York').astype(str),
'datetime_strings_utc': pd.Series(
pd.date_range(start='2017-01-02 01:02:03.234', periods=3).values,
).dt.tz_localize('UTC').astype(str),
'decimal': list(map(decimal.Decimal, ['1.0', '2', '3.234'])),
'array_of_float64': [[1.0, 2.0], [3.0], []],
'array_of_int64': [[1, 2], [], [3]],
'array_of_strings': [['a', 'b'], [], ['c']],
'map_of_strings_integers': [{'a': 1, 'b': 2}, None, {}],
'map_of_integers_strings': [{}, None, {1: 'a', 2: 'b'}],
'map_of_complex_values': [None, {'a': [1, 2, 3], 'b': []}, {}],
})
@pytest.fixture(scope='module')
def batting_df():
path = os.path.join(
os.environ.get('IBIS_TEST_DATA_DIRECTORY', ''),
'batting.csv'
)
if not os.path.exists(path):
pytest.skip('{} not found'.format(path))
elif not os.path.isfile(path):
pytest.skip('{} is not a file'.format(path))
df = pd.read_csv(path, index_col=None, sep=',')
num_rows = int(0.01 * len(df))
return df.iloc[30:30 + num_rows].reset_index(drop=True)
@pytest.fixture(scope='module')
def awards_players_df():
path = os.path.join(
os.environ.get('IBIS_TEST_DATA_DIRECTORY', ''),
'awards_players.csv'
)
if not os.path.exists(path):
pytest.skip('{} not found'.format(path))
elif not os.path.isfile(path):
pytest.skip('{} is not a file'.format(path))
return pd.read_csv(path, index_col=None, sep=',')
@pytest.fixture(scope='module')
def df1():
return pd.DataFrame(
{'key': list('abcd'), 'value': [3, 4, 5, 6], 'key2': list('eeff')}
)
@pytest.fixture(scope='module')
def df2():
return pd.DataFrame({
'key': list('ac'),
'other_value': [4.0, 6.0],
'key3': list('fe')
})
@pytest.fixture(scope='module')
def time_df1():
return pd.DataFrame(
{'time': pd.to_datetime([1, 2, 3, 4]), 'value': [1.1, 2.2, 3.3, 4.4]}
)
@pytest.fixture(scope='module')
def time_df2():
return pd.DataFrame(
{'time': pd.to_datetime([2, 4]), 'other_value': [1.2, 2.0]}
)
@pytest.fixture(scope='module')
def time_keyed_df1():
return pd.DataFrame(
{
'time': pd.to_datetime([1, 1, 2, 2, 3, 3, 4, 4]),
'key': [1, 2, 1, 2, 1, 2, 1, 2],
'value': [1.1, 1.2, 2.2, 2.4, 3.3, 3.6, 4.4, 4.8]
}
)
@pytest.fixture(scope='module')
def time_keyed_df2():
return pd.DataFrame(
{
'time': pd.to_datetime([2, 2, 4, 4]),
'key': [1, 2, 1, 2],
'other_value': [1.2, 1.4, 2.0, 4.0]
}
)
@pytest.fixture(scope='module')
def client(
df, df1, df2, df3, time_df1, time_df2, time_keyed_df1, time_keyed_df2,
):
return ibis.pandas.connect(
dict(
df=df,
df1=df1,
df2=df2,
df3=df3,
left=df1,
right=df2,
time_df1=time_df1,
time_df2=time_df2,
time_keyed_df1=time_keyed_df1,
time_keyed_df2=time_keyed_df2,
)
)
@pytest.fixture(scope='module')
def df3():
return pd.DataFrame({
'key': list('ac'),
'other_value': [4.0, 6.0],
'key2': list('ae'),
'key3': list('fe')
})
t_schema = {
'decimal': dt.Decimal(4, 3),
'array_of_float64': dt.Array(dt.double),
'array_of_int64': dt.Array(dt.int64),
'array_of_strings': dt.Array(dt.string),
'map_of_strings_integers': dt.Map(dt.string, dt.int64),
'map_of_integers_strings': dt.Map(dt.int64, dt.string),
'map_of_complex_values': dt.Map(dt.string, dt.Array(dt.int64)),
}
@pytest.fixture(scope='module')
def t(client):
return client.table('df', schema=t_schema)
@pytest.fixture(scope='module')
def lahman(batting_df, awards_players_df):
return ibis.pandas.connect({
'batting': batting_df,
'awards_players': awards_players_df,
})
@pytest.fixture(scope='module')
def left(client):
return client.table('left')
@pytest.fixture(scope='module')
def right(client):
return client.table('right')
@pytest.fixture(scope='module')
def time_left(client):
return client.table('time_df1')
@pytest.fixture(scope='module')
def time_right(client):
return client.table('time_df2')
@pytest.fixture(scope='module')
def time_keyed_left(client):
return client.table('time_keyed_df1')
@pytest.fixture(scope='module')
def time_keyed_right(client):
return client.table('time_keyed_df2')
@pytest.fixture(scope='module')
def batting(lahman):
return lahman.table('batting')
@pytest.fixture(scope='module')
def awards_players(lahman):
return lahman.table('awards_players')
@pytest.fixture(scope='module')
def sel_cols(batting):
cols = batting.columns
start, end = cols.index('AB'), cols.index('H') + 1
return ['playerID', 'yearID', 'teamID', 'G'] + cols[start:end]
@pytest.fixture(scope='module')
def players_base(batting, sel_cols):
return batting[sel_cols].sort_by(sel_cols[:3])
@pytest.fixture(scope='module')
def players(players_base):
return players_base.groupby('playerID')
@pytest.fixture(scope='module')
def players_df(players_base):
return players_base.execute().reset_index(drop=True)
| apache-2.0 | 7,093,453,546,597,148,000 | 25.814229 | 77 | 0.568249 | false |
3dfxsoftware/cbss-addons | account_banking_ccorp/account_banking_ccorp.py | 1 | 2107 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licens es/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from parsers import models
def parser_types(*args, **kwargs):
'''Delay evaluation of parser types until start of wizard, to allow
depending modules to initialize and add their parsers to the list
'''
return models.parser_type.get_parser_types()
class resPartnerBank(osv.Model):
_inherit = "res.partner.bank"
_columns = {
'parser_types': fields.selection(
parser_types,
'Parser type',
help=_("Parser type used to import bank statements file")),
'default_credit_account_id': fields.many2one(
'account.account', 'Default credit account',
select=True),
'default_debit_account_id': fields.many2one('account.account', 'Default debit account',
select=True),
}
| gpl-2.0 | 3,842,626,837,670,480,000 | 43.829787 | 103 | 0.558614 | false |
AndresYague/Snuppat | output/figuresAndTables/finalGraphPNe.py | 1 | 8661 | import sys, math, os
import matplotlib.pyplot as plt
def main():
# Check that there's at least one argument
if len(sys.argv) < 2:
print("Usage python {}".format(sys.argv[0]), end = " ")
print("<file1> [<file2> ...]")
return 1
# Automatically detect if decayed
if "decayed" in sys.argv[1]:
plotDecayed = True
else:
plotDecayed = False
# Read input file
fil = "finalGraph.in"
if os.path.isfile(fil):
with open(fil, "r") as fread:
lstyles = fread.readline().strip().split()
labs = []
for line in fread:
labs.append(line.strip())
lowZ = 34 # Lowest z value to represent
highZ = 56 # Highest z value to represent
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Go file by file
numDens = []
for archivo in sys.argv[1:]:
# Open file for reading
dens = []
fread = open(archivo, "r")
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
newline = None
for line in fread:
if "#" in line:
continue
lnlst = line.split()
if len(lnlst) == 0:
if plotDecayed:
break
else:
continue
if not plotDecayed:
# Surface (newline[0] is the mass)
prevline = newline
newline = [float(x) for x in lnlst]
if newline[0] > 0.85:
break
if plotDecayed:
dens.append(float(lnlst[1]))
# Close file
fread.close()
# Calculate values of interest
if plotDecayed:
numDens.append(dens)
else:
numDens.append([(x + y)*0.5 for (x, y) in
zip(prevline[4:], newline[4:])])
# Calculate now the agb values and print the surface mass fractions per
# each isotope
print("# Surface number fraction values")
agbValues = []
for ii in range(len(numDens)):
dic = {}
dens = numDens[ii]
# Print the model name
print("# {}".format(sys.argv[ii + 1]))
# Add the values for each element
for jj in range(len(atomicNum)):
key = atomicNum[jj]
dic[key] = dic.get(key, 0) + dens[jj]*atomicMass[jj]
# Print the number fraction
print(dens[jj])
agbValues.append(dic)
print("")
# Now identify iron:
ironNumber = namesZ["fe"]
# Now divide every element by iron
for dens in agbValues:
ironDens = dens[ironNumber]
for key in dens:
dens[key] /= ironDens
# Solar as well
ironDens = solarValues[ironNumber]
for key in solarValues:
solarValues[key] /= ironDens
# Now create the final values
finalValues = []
zList = solarValues.keys()
zList.sort(); hFe = []
for ii in range(len(agbValues)):
dens = agbValues[ii]
thisDens = []
for key in zList:
if key == 1:
hFe.append(math.log10(dens[key]/solarValues[key]))
if key < lowZ or key > highZ:
continue
val = math.log10(dens[key]/solarValues[key])
# Modify values for [X/H]
val -= hFe[ii]
thisDens.append(val)
finalValues.append(thisDens)
# Create xaxis:
xx = [x for x in zList if x >= lowZ and x <= highZ]
# Print final values
print("# [X/Fe] values")
for ii in range(len(sys.argv[1:])):
print("# {}".format(sys.argv[ii + 1]))
print("")
for jj in range(len(xx)):
print(xx[jj], finalValues[ii][jj])
print("")
# From zList create contIndx. This list contains a number of
# tuples with the first and last index of any contiguous sequence
indx = 1; first = 0
prevKey = None; contIndx = []
for key in xx:
if prevKey is None:
prevKey = key
continue
# Check if keys are contiguous
if key - prevKey > 1:
contIndx.append((first, indx))
first = indx
prevKey = key
indx += 1
# Add last tuple
contIndx.append((first, indx + 1))
# Begin plot
figure = plt.figure()
plt.xlabel("Atomic number Z", size = 14)
plt.ylabel("[X/H]", size = 14)
# Plot values
if labs is None:
labs = sys.argv[1:]
ii = 0
for dens in finalValues:
# Plot first range
first, last = contIndx[0]
if lstyles is None:
lin, = plt.plot(xx[first:last], dens[first:last],
label = labs[ii], lw = 2)
else:
lin, = plt.plot(xx[first:last], dens[first:last], lstyles[ii],
label = labs[ii], lw = 2)
# Get color and line style
col, lst = lin.get_color(), lin.get_linestyle()
colStyle = col + lst
for elem in contIndx[1:]:
first, last = elem
plt.plot(xx[first:last], dens[first:last], colStyle, lw = 2)
ii += 1
# Set floating text
namAtm = {"Se":34, "Kr":36, "Sr":38, "Zr":40,
"Mo":42, "Pd":46, "Cd":48, "Sn":50, "Te":52, "Ba":56,
"Ce":58, "Nd":60, "Sm":62, "Dy":66, "Er":68,
"Yb":70, "Hf":72, "Os":76, "Hg":80, "Pb":82,
"Rb":37, "Cs":55, "Xe":54, "Br": 35}
rNamAtm = []
for name in namAtm:
yVal = 0
if namAtm[name] < lowZ or namAtm[name] > highZ:
continue
for ii in range(len(xx)):
if xx[ii] == namAtm[name]:
yVal = finalValues[-1][ii]
break
plt.text(namAtm[name] - 0.5, yVal*1.01, name, size = 14)
if name in rNamAtm:
plt.plot(namAtm[name], yVal, "ro")
else:
plt.plot(namAtm[name], yVal, "ko")
# Observations values
# (NGC3918, NGC7027)
# Elements: Se, Kr, Rb, Xe
xxObs = [34, 35, 36, 37, 52, 54]
yyErrs = [
[0.10, "-", 0.11, 0.13, "-", 0.11]
,[0.17, 0.08, 0.09, 0.13, 0.12, 0.13]
]
PNe = [
[0.14, "-", 0.65, 0.25, "-", 0.38]
,[0.26, 0.13, 0.84, 0.70, 0.56, 0.66]
]
gray = (0.75, 0.75, 0.75)
mrk = ["^", "s"]
for star_ii in range(len(PNe)):
xxHere = []; yyHere = []; errHere = []
for ii in range(len(xxObs)):
if PNe[star_ii][ii] == "-":
continue
else:
# Fill a region for each error barr
plt.fill_between([xxObs[ii] - 0.5, xxObs[ii] + 0.5],
y1 = PNe[star_ii][ii] - yyErrs[star_ii][ii],
y2 = PNe[star_ii][ii] + yyErrs[star_ii][ii],
facecolor = gray, edgecolor = "k")
# Now append things for the actual plot
xxHere.append(xxObs[ii])
yyHere.append(PNe[star_ii][ii])
errHere.append(yyErrs[star_ii][ii])
plt.plot(xxHere, yyHere, "k" + mrk[star_ii], lw = 2, ms = 8)
plt.legend(loc=0, ncol = 2, prop = {'size': 12})
plt.show()
if __name__ == "__main__":
main()
| mit | 8,277,514,883,426,380,000 | 29.072917 | 80 | 0.472925 | false |
tsuna/tcollector | collectors/0/zfsiostats.py | 1 | 9299 | #!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2012 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
'''
ZFS I/O and disk space statistics for TSDB
This plugin tracks, for all pools:
- I/O
zfs.io.pool.{ops.read, ops.write}
zfs.io.pool.{bps.read, bps.write}
zfs.io.device.{ops.read, ops.write}
zfs.io.device.{bps.read, bps.write}
- disk space
zfs.df.pool.kb.{used, free}
zfs.df.device.kb.{used, free}
Disk space usage is given in kbytes.
Throughput is given in operations/s and bytes/s.
'''
import errno
import sys
import time
import subprocess
import re
import signal
import os
from collectors.lib import utils
try:
from collectors.etc import zfsiostats_conf
except ImportError:
zfsiostats_conf = None
DEFAULT_COLLECTION_INTERVAL=15
def convert_to_bytes(string):
"""Take a string in the form 1234K, and convert to bytes"""
factors = {
"K": 1024,
"M": 1024 * 1024,
"G": 1024 * 1024 * 1024,
"T": 1024 * 1024 * 1024 * 1024,
"P": 1024 * 1024 * 1024 * 1024 * 1024,
}
if string == "-": return 0
for f, fm in factors.items():
if string.endswith(f):
number = float(string[:-1])
number = number * fm
return long(number)
return long(string)
def convert_wo_prefix(string):
"""Take a string in the form 1234K, and convert without metric prefix"""
factors = {
"K": 1000,
"M": 1000 * 1000,
"G": 1000 * 1000 * 1000,
"T": 1000 * 1000 * 1000 * 1000,
"P": 1000 * 1000 * 1000 * 1000 * 1000,
}
if string == "-": return 0
for f, fm in factors.items():
if string.endswith(f):
number = float(string[:-1])
number = number * fm
return long(number)
return long(string)
def extract_info(line):
(poolname,
alloc, free,
read_issued, write_issued,
read_throughput, write_throughput) = line.split()
s_df = {}
# 1k blocks
s_df["used"] = convert_to_bytes(alloc) / 1024
s_df["free"] = convert_to_bytes(free) / 1024
s_io = {}
# magnitudeless variable
s_io["ops.read"] = convert_wo_prefix(read_issued)
s_io["ops.write"] = convert_wo_prefix(write_issued)
# throughput
s_io["bps.read"] = convert_to_bytes(read_throughput)
s_io["bps.write"] = convert_to_bytes(write_throughput)
return poolname, s_df, s_io
T_START = 1
T_HEADERS = 2
T_SEPARATOR = 3
T_POOL = 4
T_DEVICE = 5
T_EMPTY = 6
T_LEG = 7
signal_received = None
def handlesignal(signum, stack):
global signal_received
signal_received = signum
def main():
"""zfsiostats main loop"""
global signal_received
collection_interval=DEFAULT_COLLECTION_INTERVAL
if(zfsiostats_conf):
config = zfsiostats_conf.get_config()
collection_interval=config['collection_interval']
signal.signal(signal.SIGTERM, handlesignal)
signal.signal(signal.SIGINT, handlesignal)
try:
p_zpool = subprocess.Popen(
["zpool", "iostat", "-v", str(collection_interval)],
stdout=subprocess.PIPE,
)
except OSError, e:
if e.errno == errno.ENOENT:
# it makes no sense to run this collector here
sys.exit(13) # we signal tcollector to not run us
raise
firstloop = True
lastleg = 0
ltype = None
timestamp = int(time.time())
capacity_stats_pool = {}
capacity_stats_device = {}
io_stats_pool = {}
io_stats_device = {}
start_re = re.compile(".*capacity.*operations.*bandwidth")
headers_re = re.compile(".*pool.*alloc.*free.*read.*write.*read.*write")
separator_re = re.compile(".*-----.*-----.*-----")
while signal_received is None:
try:
line = p_zpool.stdout.readline()
except (IOError, OSError), e:
if e.errno in (errno.EINTR, errno.EAGAIN):
break
raise
if not line:
# end of the program, die
break
if start_re.match(line):
assert ltype in (None, T_EMPTY), \
"expecting last state T_EMPTY or None, now got %s" % ltype
ltype = T_START
elif headers_re.match(line):
assert ltype == T_START, \
"expecting last state T_START, now got %s" % ltype
ltype = T_HEADERS
elif separator_re.match(line):
assert ltype in (T_DEVICE, T_HEADERS), \
"expecting last state T_DEVICE or T_HEADERS, now got %s" % ltype
ltype = T_SEPARATOR
elif len(line) < 2:
assert ltype == T_SEPARATOR, \
"expecting last state T_SEPARATOR, now got %s" % ltype
ltype = T_EMPTY
elif line.startswith(" mirror"):
assert ltype in (T_POOL, T_DEVICE), \
"expecting last state T_POOL or T_DEVICE, now got %s" % ltype
ltype = T_LEG
elif line.startswith(" "):
assert ltype in (T_POOL, T_DEVICE, T_LEG), \
"expecting last state T_POOL or T_DEVICE or T_LEG, now got %s" % ltype
ltype = T_DEVICE
else:
# must be a pool name
#assert ltype == T_SEPARATOR, \
# "expecting last state T_SEPARATOR, now got %s" % ltype
if ltype == T_SEPARATOR:
parentpoolname = ""
ltype = T_POOL
if ltype == T_START:
for x in (
capacity_stats_pool, capacity_stats_device,
io_stats_pool, io_stats_device,
):
x.clear()
timestamp = int(time.time())
elif ltype == T_POOL:
line = line.strip()
poolname, s_df, s_io = extract_info(line)
if parentpoolname == "":
parentpoolname = poolname
else:
poolname=parentpoolname+"."+poolname
capacity_stats_pool[poolname] = s_df
io_stats_pool[poolname] = s_io
# marker for leg
last_leg = 0
elif ltype == T_LEG:
last_leg = last_leg + 1
line = line.strip()
devicename, s_df, s_io = extract_info(line)
capacity_stats_device["%s %s%s" % (poolname, devicename, last_leg)] = s_df
io_stats_device["%s %s%s" % (poolname, devicename, last_leg)] = s_io
elif ltype == T_DEVICE:
line = line.strip()
devicename, s_df, s_io = extract_info(line)
capacity_stats_device["%s %s" % (poolname, devicename)] = s_df
io_stats_device["%s %s" % (poolname, devicename)] = s_io
elif ltype == T_EMPTY:
if firstloop:
firstloop = False
else:
# this flag prevents printing out of the data in the first loop
# which is a since-boot summary similar to iostat
# and is useless to us
for poolname, stats in capacity_stats_pool.items():
fm = "zfs.df.pool.kb.%s %d %s pool=%s"
for statname, statnumber in stats.items():
print fm % (statname, timestamp, statnumber, poolname)
for poolname, stats in io_stats_pool.items():
fm = "zfs.io.pool.%s %d %s pool=%s"
for statname, statnumber in stats.items():
print fm % (statname, timestamp, statnumber, poolname)
for devicename, stats in capacity_stats_device.items():
fm = "zfs.df.device.kb.%s %d %s device=%s pool=%s"
poolname, devicename = devicename.split(" ", 1)
for statname, statnumber in stats.items():
print fm % (statname, timestamp, statnumber,
devicename, poolname)
for devicename, stats in io_stats_device.items():
fm = "zfs.io.device.%s %d %s device=%s pool=%s"
poolname, devicename = devicename.split(" ", 1)
for statname, statnumber in stats.items():
print fm % (statname, timestamp, statnumber,
devicename, poolname)
sys.stdout.flush()
# if this was the first loop, well, we're onto the second loop
# so we turh the flag off
if signal_received is None:
signal_received = signal.SIGTERM
try:
os.kill(p_zpool.pid, signal_received)
except Exception:
pass
p_zpool.wait()
if __name__ == "__main__":
main()
| gpl-3.0 | 2,507,122,525,126,365,000 | 33.313653 | 86 | 0.559523 | false |
thenetcircle/dino | dino/web.py | 1 | 3853 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from flask import Flask
from flask_socketio import SocketIO
from werkzeug.contrib.fixers import ProxyFix
from dino import environ
from dino.config import ConfigKeys
__author__ = 'Oscar Eriksson <[email protected]>'
logger = logging.getLogger(__name__)
logging.getLogger('amqp').setLevel(logging.INFO)
logging.getLogger('kafka.conn').setLevel(logging.INFO)
logging.getLogger('kafka.client').setLevel(logging.INFO)
logging.getLogger('kafka.metrics').setLevel(logging.INFO)
class ReverseProxied(object):
"""
Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
def create_app():
_app = Flask(
import_name=__name__,
template_folder='admin/templates/',
static_folder='admin/static/')
# used for encrypting cookies for handling sessions
_app.config['SECRET_KEY'] = 'abc492ee-9739-11e6-a174-07f6b92d4a4b'
_app.config['ROOT_URL'] = environ.env.config.get(ConfigKeys.ROOT_URL, domain=ConfigKeys.WEB, default='/')
message_queue_type = environ.env.config.get(ConfigKeys.TYPE, domain=ConfigKeys.QUEUE, default=None)
if message_queue_type is None and not (len(environ.env.config) == 0 or environ.env.config.get(ConfigKeys.TESTING)):
raise RuntimeError('no message queue type specified')
message_queue = 'redis://%s' % environ.env.config.get(ConfigKeys.HOST, domain=ConfigKeys.CACHE_SERVICE, default='')
message_channel = 'dino_%s' % environ.env.config.get(ConfigKeys.ENVIRONMENT, default='test')
logger.info('message_queue: %s' % message_queue)
_socketio = SocketIO(
_app,
logger=logger,
engineio_logger=os.environ.get('DINO_DEBUG', '0') == '1',
async_mode='eventlet',
message_queue=message_queue,
channel=message_channel)
# preferably "emit" should be set during env creation, but the socketio object is not created until after env is
environ.env.out_of_scope_emit = _socketio.emit
_app.wsgi_app = ReverseProxied(ProxyFix(_app.wsgi_app))
return _app, _socketio
app, socketio = create_app()
environ.init_web_auth(environ.env)
# keep this, otherwise flask won't find any routes
import dino.admin.routes
| apache-2.0 | 450,528,208,337,806,850 | 34.675926 | 119 | 0.680249 | false |
Eksmo/calibre | src/calibre/utils/Zeroconf.py | 1 | 55471 | """ Multicast DNS Service Discovery for Python
Copyright (C) 2003, Paul Scott-Murphy
Copyright (C) 2009, Alexander Solovyov
This module provides a framework for the use of DNS Service Discovery
using IP multicast. It has been tested against the JRendezvous
implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
against the mDNSResponder from Mac OS X 10.3.8, 10.5.6, and against
Avahi library under various Linux distributions.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
"""
"""0.13 update - fix IPv6 support
some cleanups in code"""
"""0.12 update - allow selection of binding interface
typo fix - Thanks A. M. Kuchlingi
removed all use of word 'Rendezvous' - this is an API change"""
"""0.11 update - correction to comments for addListener method
support for new record types seen from OS X
- IPv6 address
- hostinfo
ignore unknown DNS record types
fixes to name decoding
works alongside other processes using port 5353 (e.g. on Mac OS X)
tested against Mac OS X 10.3.2's mDNSResponder
corrections to removal of list entries for service browser"""
"""0.10 update - Jonathon Paisley contributed these corrections:
always multicast replies, even when query is unicast
correct a pointer encoding problem
can now write records in any order
traceback shown on failure
better TXT record parsing
server is now separate from name
can cancel a service browser
modified some unit tests to accommodate these changes"""
"""0.09 update - remove all records on service unregistration
fix DOS security problem with readName"""
"""0.08 update - changed licensing to LGPL"""
"""0.07 update - faster shutdown on engine
pointer encoding of outgoing names
ServiceBrowser now works
new unit tests"""
"""0.06 update - small improvements with unit tests
added defined exception types
new style objects
fixed hostname/interface problem
fixed socket timeout problem
fixed addServiceListener() typo bug
using select() for socket reads
tested on Debian unstable with Python 2.2.2"""
"""0.05 update - ensure case insensitivty on domain names
support for unicast DNS queries"""
"""0.04 update - added some unit tests
added __ne__ adjuncts where required
ensure names end in '.local.'
timeout on receiving socket for clean shutdown"""
__author__ = "Paul Scott-Murphy"
__email__ = "paul at scott dash murphy dot com"
__version__ = "0.12"
import string
import time
import struct
import socket
import threading
import select
import traceback
__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
# hook for threads
globals()['_GLOBAL_DONE'] = 0
# Some timing constants
_UNREGISTER_TIME = 125
_CHECK_TIME = 175
_REGISTER_TIME = 225
_LISTENER_TIME = 200
_BROWSER_TIME = 500
# Some DNS constants
_MDNS_ADDR = '224.0.0.251'
_MDNS_PORT = 5353;
_DNS_PORT = 53;
_DNS_TTL = 60 * 60; # one hour default TTL
_MAX_MSG_TYPICAL = 1460 # unused
_MAX_MSG_ABSOLUTE = 8972
_FLAGS_QR_MASK = 0x8000 # query response mask
_FLAGS_QR_QUERY = 0x0000 # query
_FLAGS_QR_RESPONSE = 0x8000 # response
_FLAGS_AA = 0x0400 # Authorative answer
_FLAGS_TC = 0x0200 # Truncated
_FLAGS_RD = 0x0100 # Recursion desired
_FLAGS_RA = 0x8000 # Recursion available
_FLAGS_Z = 0x0040 # Zero
_FLAGS_AD = 0x0020 # Authentic data
_FLAGS_CD = 0x0010 # Checking disabled
_CLASS_IN = 1
_CLASS_CS = 2
_CLASS_CH = 3
_CLASS_HS = 4
_CLASS_NONE = 254
_CLASS_ANY = 255
_CLASS_MASK = 0x7FFF
_CLASS_UNIQUE = 0x8000
_TYPE_A = 1
_TYPE_NS = 2
_TYPE_MD = 3
_TYPE_MF = 4
_TYPE_CNAME = 5
_TYPE_SOA = 6
_TYPE_MB = 7
_TYPE_MG = 8
_TYPE_MR = 9
_TYPE_NULL = 10
_TYPE_WKS = 11
_TYPE_PTR = 12
_TYPE_HINFO = 13
_TYPE_MINFO = 14
_TYPE_MX = 15
_TYPE_TXT = 16
_TYPE_AAAA = 28
_TYPE_SRV = 33
_TYPE_ANY = 255
# Mapping constants to names
_CLASSES = { _CLASS_IN : "in",
_CLASS_CS : "cs",
_CLASS_CH : "ch",
_CLASS_HS : "hs",
_CLASS_NONE : "none",
_CLASS_ANY : "any" }
_TYPES = { _TYPE_A : "a",
_TYPE_NS : "ns",
_TYPE_MD : "md",
_TYPE_MF : "mf",
_TYPE_CNAME : "cname",
_TYPE_SOA : "soa",
_TYPE_MB : "mb",
_TYPE_MG : "mg",
_TYPE_MR : "mr",
_TYPE_NULL : "null",
_TYPE_WKS : "wks",
_TYPE_PTR : "ptr",
_TYPE_HINFO : "hinfo",
_TYPE_MINFO : "minfo",
_TYPE_MX : "mx",
_TYPE_TXT : "txt",
_TYPE_AAAA : "quada",
_TYPE_SRV : "srv",
_TYPE_ANY : "any" }
# utility functions
def currentTimeMillis():
"""Current system time in milliseconds"""
return time.time() * 1000
def ntop(address):
"""Convert address to its string representation"""
af = len(address) == 4 and socket.AF_INET or socket.AF_INET6
return socket.inet_ntop(af, address)
def address_type(address):
"""Return appropriate record type for an address"""
return len(address) == 4 and _TYPE_A or _TYPE_AAAA
# Exceptions
class NonLocalNameException(Exception):
pass
class NonUniqueNameException(Exception):
pass
class NamePartTooLongException(Exception):
pass
class AbstractMethodException(Exception):
pass
class BadTypeInNameException(Exception):
pass
class BadDomainName(Exception):
def __init__(self, pos):
Exception.__init__(self, "at position " + str(pos))
class BadDomainNameCircular(BadDomainName):
pass
# implementation classes
class DNSEntry(object):
"""A DNS entry"""
def __init__(self, name, type, clazz):
self.key = string.lower(name)
self.name = name
self.type = type
self.clazz = clazz & _CLASS_MASK
self.unique = (clazz & _CLASS_UNIQUE) != 0
def __eq__(self, other):
"""Equality test on name, type, and class"""
if isinstance(other, DNSEntry):
return self.name == other.name and self.type == other.type and self.clazz == other.clazz
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def getClazz(self, clazz):
"""Class accessor"""
try:
return _CLASSES[clazz]
except:
return "?(%s)" % (clazz)
def getType(self, type):
"""Type accessor"""
try:
return _TYPES[type]
except:
return "?(%s)" % (type)
def toString(self, hdr, other):
"""String representation with additional information"""
result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
if self.unique:
result += "-unique,"
else:
result += ","
result += self.name
if other is not None:
result += ",%s]" % (other)
else:
result += "]"
return result
class DNSQuestion(DNSEntry):
"""A DNS question entry"""
def __init__(self, name, type, clazz):
if not name.endswith(".local."):
raise NonLocalNameException(name)
DNSEntry.__init__(self, name, type, clazz)
def answeredBy(self, rec):
"""Returns true if the question is answered by the record"""
return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
def __repr__(self):
"""String representation"""
return DNSEntry.toString(self, "question", None)
class DNSRecord(DNSEntry):
"""A DNS record - like a DNS entry, but has a TTL"""
def __init__(self, name, type, clazz, ttl):
DNSEntry.__init__(self, name, type, clazz)
self.ttl = ttl
self.created = currentTimeMillis()
def __eq__(self, other):
"""Tests equality as per DNSRecord"""
if isinstance(other, DNSRecord):
return DNSEntry.__eq__(self, other)
return 0
def suppressedBy(self, msg):
"""Returns true if any answer in a message can suffice for the
information held in this record."""
for record in msg.answers:
if self.suppressedByAnswer(record):
return 1
return 0
def suppressedByAnswer(self, other):
"""Returns true if another record has same name, type and class,
and if its TTL is at least half of this record's."""
if self == other and other.ttl > (self.ttl / 2):
return 1
return 0
def getExpirationTime(self, percent):
"""Returns the time at which this record will have expired
by a certain percentage."""
return self.created + (percent * self.ttl * 10)
def getRemainingTTL(self, now):
"""Returns the remaining TTL in seconds."""
return max(0, (self.getExpirationTime(100) - now) / 1000)
def isExpired(self, now):
"""Returns true if this record has expired."""
return self.getExpirationTime(100) <= now
def isStale(self, now):
"""Returns true if this record is at least half way expired."""
return self.getExpirationTime(50) <= now
def resetTTL(self, other):
"""Sets this record's TTL and created time to that of
another record."""
self.created = other.created
self.ttl = other.ttl
def write(self, out):
"""Abstract method"""
raise AbstractMethodException
def toString(self, other):
"""String representation with addtional information"""
arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
return DNSEntry.toString(self, "record", arg)
class DNSAddress(DNSRecord):
"""A DNS address record"""
def __init__(self, name, type, clazz, ttl, address):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.address = address
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.address, len(self.address))
def __eq__(self, other):
"""Tests equality on address"""
if isinstance(other, DNSAddress):
return self.address == other.address
return 0
def __repr__(self):
"""String representation"""
try:
return 'record[%s]' % ntop(self.address)
except:
return 'record[%s]' % self.address
class DNSHinfo(DNSRecord):
"""A DNS host information record"""
def __init__(self, name, type, clazz, ttl, cpu, os):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.cpu = cpu
self.os = os
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.cpu, len(self.cpu))
out.writeString(self.os, len(self.os))
def __eq__(self, other):
"""Tests equality on cpu and os"""
if isinstance(other, DNSHinfo):
return self.cpu == other.cpu and self.os == other.os
return 0
def __repr__(self):
"""String representation"""
return self.cpu + " " + self.os
class DNSPointer(DNSRecord):
"""A DNS pointer record"""
def __init__(self, name, type, clazz, ttl, alias):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.alias = alias
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeName(self.alias)
def __eq__(self, other):
"""Tests equality on alias"""
if isinstance(other, DNSPointer):
return self.alias == other.alias
return 0
def __repr__(self):
"""String representation"""
return self.toString(self.alias)
class DNSText(DNSRecord):
"""A DNS text record"""
def __init__(self, name, type, clazz, ttl, text):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.text = text
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.text, len(self.text))
def __eq__(self, other):
"""Tests equality on text"""
if isinstance(other, DNSText):
return self.text == other.text
return 0
def __repr__(self):
"""String representation"""
if len(self.text) > 10:
return self.toString(self.text[:7] + "...")
else:
return self.toString(self.text)
class DNSService(DNSRecord):
"""A DNS service record"""
def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.priority = priority
self.weight = weight
self.port = port
self.server = server
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeShort(self.priority)
out.writeShort(self.weight)
out.writeShort(self.port)
out.writeName(self.server)
def __eq__(self, other):
"""Tests equality on priority, weight, port and server"""
if isinstance(other, DNSService):
return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
return 0
def __repr__(self):
"""String representation"""
return self.toString("%s:%s" % (self.server, self.port))
class DNSIncoming(object):
"""Object representation of an incoming DNS packet"""
def __init__(self, data):
"""Constructor from string holding bytes of packet"""
self.offset = 0
self.data = data
self.questions = []
self.answers = []
self.numQuestions = 0
self.numAnswers = 0
self.numAuthorities = 0
self.numAdditionals = 0
self.readHeader()
self.readQuestions()
self.readOthers()
def readHeader(self):
"""Reads header portion of packet"""
format = '!HHHHHH'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
self.id = info[0]
self.flags = info[1]
self.numQuestions = info[2]
self.numAnswers = info[3]
self.numAuthorities = info[4]
self.numAdditionals = info[5]
def readQuestions(self):
"""Reads questions section of packet"""
format = '!HH'
length = struct.calcsize(format)
for i in range(0, self.numQuestions):
name = self.readName()
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
try:
question = DNSQuestion(name, info[0], info[1])
self.questions.append(question)
except NonLocalNameException:
pass
def readInt(self):
"""Reads an integer from the packet"""
format = '!I'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readCharacterString(self):
"""Reads a character string from the packet"""
length = ord(self.data[self.offset])
self.offset += 1
return self.readString(length)
def readString(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readUnsignedShort(self):
"""Reads an unsigned short from the packet"""
format = '!H'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readOthers(self):
"""Reads the answers, authorities and additionals section of the packet"""
format = '!HHiH'
length = struct.calcsize(format)
n = self.numAnswers + self.numAuthorities + self.numAdditionals
for i in range(0, n):
domain = self.readName()
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
rec = None
if info[0] == _TYPE_A:
rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
elif info[0] == _TYPE_TXT:
rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
elif info[0] == _TYPE_SRV:
rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
elif info[0] == _TYPE_HINFO:
rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
elif info[0] == _TYPE_AAAA:
rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
else:
# Skip unknown record type (using DNS length field)
self.offset += info[3]
if rec is not None:
self.answers.append(rec)
def isQuery(self):
"""Returns true if this is a query"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
def isResponse(self):
"""Returns true if this is a response"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
def readUTF(self, offset, len):
"""Reads a UTF-8 string of a given length from the packet"""
return self.data[offset:offset+len].decode('utf-8')
def readName(self):
"""Reads a domain name from the packet"""
result = ''
off = self.offset
next = -1
first = off
while 1:
len = ord(self.data[off])
off += 1
if len == 0:
break
t = len & 0xC0
if t == 0x00:
result = ''.join((result, self.readUTF(off, len) + '.'))
off += len
elif t == 0xC0:
if next < 0:
next = off + 1
off = ((len & 0x3F) << 8) | ord(self.data[off])
if off >= first:
raise BadDomainNameCircular(off)
first = off
else:
raise BadDomainName(off)
if next >= 0:
self.offset = next
else:
self.offset = off
return result
class DNSOutgoing(object):
"""Object representation of an outgoing packet"""
def __init__(self, flags, multicast = 1):
self.finished = 0
self.id = 0
self.multicast = multicast
self.flags = flags
self.names = {}
self.data = []
self.size = 12
self.questions = []
self.answers = []
self.authorities = []
self.additionals = []
def addQuestion(self, record):
"""Adds a question"""
self.questions.append(record)
def addAnswer(self, inp, record):
"""Adds an answer"""
if not record.suppressedBy(inp):
self.addAnswerAtTime(record, 0)
def addAnswerAtTime(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.isExpired(now):
self.answers.append((record, now))
def addAuthorativeAnswer(self, record):
"""Adds an authoritative answer"""
self.authorities.append(record)
def addAdditionalAnswer(self, record):
"""Adds an additional answer"""
self.additionals.append(record)
def writeByte(self, value):
"""Writes a single byte to the packet"""
format = '!c'
self.data.append(struct.pack(format, chr(value)))
self.size += 1
def insertShort(self, index, value):
"""Inserts an unsigned short in a certain position in the packet"""
format = '!H'
self.data.insert(index, struct.pack(format, value))
self.size += 2
def writeShort(self, value):
"""Writes an unsigned short to the packet"""
format = '!H'
self.data.append(struct.pack(format, value))
self.size += 2
def writeInt(self, value):
"""Writes an unsigned integer to the packet"""
format = '!I'
self.data.append(struct.pack(format, int(value)))
self.size += 4
def writeString(self, value, length):
"""Writes a string to the packet"""
format = '!' + str(length) + 's'
self.data.append(struct.pack(format, value))
self.size += length
def writeUTF(self, s):
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self.writeByte(length)
self.writeString(utfstr, length)
def writeName(self, name):
"""Writes a domain name to the packet"""
try:
# Find existing instance of this name in packet
#
index = self.names[name]
except KeyError:
# No record of this name already, so write it
# out as normal, recording the location of the name
# for future pointers to it.
#
self.names[name] = self.size
parts = name.split('.')
if parts[-1] == '':
parts = parts[:-1]
for part in parts:
self.writeUTF(part)
self.writeByte(0)
return
# An index was found, so write a pointer to it
#
self.writeByte((index >> 8) | 0xC0)
self.writeByte(index)
def writeQuestion(self, question):
"""Writes a question to the packet"""
self.writeName(question.name)
self.writeShort(question.type)
self.writeShort(question.clazz)
def writeRecord(self, record, now):
"""Writes a record (answer, authoritative answer, additional) to
the packet"""
self.writeName(record.name)
self.writeShort(record.type)
if record.unique and self.multicast:
self.writeShort(record.clazz | _CLASS_UNIQUE)
else:
self.writeShort(record.clazz)
if now == 0:
self.writeInt(record.ttl)
else:
self.writeInt(record.getRemainingTTL(now))
index = len(self.data)
# Adjust size for the short we will write before this record
#
self.size += 2
record.write(self)
self.size -= 2
length = len(''.join(self.data[index:]))
self.insertShort(index, length) # Here is the short we adjusted for
def packet(self):
"""Returns a string containing the packet's bytes
No further parts should be added to the packet once this
is done."""
if not self.finished:
self.finished = 1
for question in self.questions:
self.writeQuestion(question)
for answer, time in self.answers:
self.writeRecord(answer, time)
for authority in self.authorities:
self.writeRecord(authority, 0)
for additional in self.additionals:
self.writeRecord(additional, 0)
self.insertShort(0, len(self.additionals))
self.insertShort(0, len(self.authorities))
self.insertShort(0, len(self.answers))
self.insertShort(0, len(self.questions))
self.insertShort(0, self.flags)
if self.multicast:
self.insertShort(0, 0)
else:
self.insertShort(0, self.id)
return ''.join(self.data)
class DNSCache(object):
"""A cache of DNS entries"""
def __init__(self):
self.cache = {}
def add(self, entry):
"""Adds an entry"""
try:
list = self.cache[entry.key]
except:
list = self.cache[entry.key] = []
list.append(entry)
def remove(self, entry):
"""Removes an entry"""
try:
list = self.cache[entry.key]
list.remove(entry)
except:
pass
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None
def getByDetails(self, name, type, clazz):
"""Gets an entry by details. Will return None if there is
no matching entry."""
entry = DNSEntry(name, type, clazz)
return self.get(entry)
def entriesWithName(self, name):
"""Returns a list of entries whose key matches the name."""
try:
return self.cache[name]
except:
return []
def entries(self):
"""Returns a list of all entries"""
def add(x, y): return x+y
try:
return reduce(add, self.cache.values())
except:
return []
class Engine(threading.Thread):
"""An engine wraps read access to sockets, allowing objects that
need to receive data from sockets to be called back when the
sockets are ready.
A reader needs a handle_read() method, which is called when the socket
it is interested in is ready for reading.
Writers are not implemented here, because we only send short
packets.
"""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.readers = {} # maps socket to reader
self.timeout = 5
self.condition = threading.Condition()
self.setDaemon(True) # By Kovid
self.start()
def run(self):
while not globals()['_GLOBAL_DONE']:
rs = self.getReaders()
if len(rs) == 0:
# No sockets to manage, but we wait for the timeout
# or addition of a socket
#
self.condition.acquire()
self.condition.wait(self.timeout)
self.condition.release()
else:
from calibre.constants import DEBUG
try:
rr, wr, er = select.select(rs, [], [], self.timeout)
for socket in rr:
try:
self.readers[socket].handle_read()
except:
if DEBUG:
traceback.print_exc()
except:
pass
def getReaders(self):
self.condition.acquire()
result = self.readers.keys()
self.condition.release()
return result
def addReader(self, reader, socket):
self.condition.acquire()
self.readers[socket] = reader
self.condition.notify()
self.condition.release()
def delReader(self, socket):
self.condition.acquire()
del(self.readers[socket])
self.condition.notify()
self.condition.release()
def notify(self):
self.condition.acquire()
self.condition.notify()
self.condition.release()
class Listener(object):
"""A Listener is used by this module to listen on the multicast
group to which DNS messages are sent, allowing the implementation
to cache information as it arrives.
It requires registration with an Engine object in order to have
the read() method called when a socket is availble for reading."""
def __init__(self, zeroconf):
self.zeroconf = zeroconf
self.zeroconf.engine.addReader(self, self.zeroconf.socket)
def handle_read(self):
data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
self.data = data
msg = DNSIncoming(data)
if msg.isQuery():
# Always multicast responses
#
if port == _MDNS_PORT:
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
# If it's not a multicast query, reply via unicast
# and multicast
#
elif port == _DNS_PORT:
self.zeroconf.handleQuery(msg, addr, port)
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
else:
self.zeroconf.handleResponse(msg)
class Reaper(threading.Thread):
"""A Reaper is used by this module to remove cache entries that
have expired."""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.setDaemon(True) # By Kovid
self.zeroconf = zeroconf
self.start()
def run(self):
while 1:
try:
self.zeroconf.wait(10 * 1000)
except TypeError: # By Kovid
globals()['_GLOBAL_DONE'] = 1
return
if globals()['_GLOBAL_DONE']:
return
now = currentTimeMillis()
for record in self.zeroconf.cache.entries():
if record.isExpired(now):
self.zeroconf.updateRecord(now, record)
self.zeroconf.cache.remove(record)
class ServiceBrowser(threading.Thread):
"""Used to browse for a service of a specific type.
The listener object will have its addService() and
removeService() methods called when this browser
discovers changes in the services availability."""
def __init__(self, zeroconf, type, listener):
"""Creates a browser for a specific type"""
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.type = type
self.listener = listener
self.services = {}
self.nextTime = currentTimeMillis()
self.delay = _BROWSER_TIME
self.list = []
self.done = 0
self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
self.start()
def updateRecord(self, zeroconf, now, record):
"""Callback invoked by Zeroconf when new information arrives.
Updates information required by browser in the Zeroconf cache."""
if record.type == _TYPE_PTR and record.name == self.type:
expired = record.isExpired(now)
try:
oldrecord = self.services[record.alias.lower()]
if not expired:
oldrecord.resetTTL(record)
else:
del(self.services[record.alias.lower()])
callback = lambda x: self.listener.removeService(x, self.type, record.alias)
self.list.append(callback)
return
except:
if not expired:
self.services[record.alias.lower()] = record
callback = lambda x: self.listener.addService(x, self.type, record.alias)
self.list.append(callback)
expires = record.getExpirationTime(75)
if expires < self.nextTime:
self.nextTime = expires
def cancel(self):
self.done = 1
self.zeroconf.notifyAll()
def run(self):
while 1:
event = None
now = currentTimeMillis()
if len(self.list) == 0 and self.nextTime > now:
self.zeroconf.wait(self.nextTime - now)
if globals()['_GLOBAL_DONE'] or self.done:
return
now = currentTimeMillis()
if self.nextTime <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
for record in self.services.values():
if not record.isExpired(now):
out.addAnswerAtTime(record, now)
self.zeroconf.send(out)
self.nextTime = now + self.delay
self.delay = min(20 * 1000, self.delay * 2)
if len(self.list) > 0:
event = self.list.pop(0)
if event is not None:
event(self.zeroconf)
class ServiceInfo(object):
"""Service information"""
def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
"""Create a service description.
type: fully qualified service type name
name: fully qualified service name
address: IP address as unsigned short, network byte order
port: port that the service runs on
weight: weight of the service
priority: priority of the service
properties: dictionary of properties (or a string holding the bytes for the text field)
server: fully qualified name for service host (defaults to name)"""
if not name.endswith(type):
raise BadTypeInNameException
self.type = type
self.name = name
self.address = address
if address:
self.ip_type = address_type(address)
self.port = port
self.weight = weight
self.priority = priority
if server:
self.server = server
else:
self.server = name
self.setProperties(properties)
def setProperties(self, properties):
"""Sets properties and text of this info from a dictionary"""
if isinstance(properties, dict):
self.properties = properties
list = []
result = ''
for key in properties:
value = properties[key]
if value is None:
suffix = ''
elif isinstance(value, str):
suffix = value
elif isinstance(value, int):
suffix = value and 'true' or 'false'
else:
suffix = ''
list.append('='.join((key, suffix)))
for item in list:
result = ''.join((result, struct.pack('!c', chr(len(item))), item))
self.text = result
else:
self.text = properties
def setText(self, text):
"""Sets properties and text given a text field"""
self.text = text
try:
result = {}
end = len(text)
index = 0
strs = []
while index < end:
length = ord(text[index])
index += 1
strs.append(text[index:index+length])
index += length
for s in strs:
eindex = s.find('=')
if eindex == -1:
# No equals sign at all
key = s
value = 0
else:
key = s[:eindex]
value = s[eindex+1:]
if value == 'true':
value = 1
elif value == 'false' or not value:
value = 0
# Only update non-existent properties
if key and result.get(key) == None:
result[key] = value
self.properties = result
except:
traceback.print_exc()
self.properties = None
def getType(self):
"""Type accessor"""
return self.type
def getName(self):
"""Name accessor"""
if self.type is not None and self.name.endswith("." + self.type):
return self.name[:len(self.name) - len(self.type) - 1]
return self.name
def getAddress(self):
"""Address accessor"""
return self.address
def getPort(self):
"""Port accessor"""
return self.port
def getPriority(self):
"""Pirority accessor"""
return self.priority
def getWeight(self):
"""Weight accessor"""
return self.weight
def getProperties(self):
"""Properties accessor"""
return self.properties
def getText(self):
"""Text accessor"""
return self.text
def getServer(self):
"""Server accessor"""
return self.server
def updateRecord(self, zeroconf, now, record):
"""Updates service information from a DNS record"""
if record is None or record.isExpired(now):
return
if (record.type in (_TYPE_A, _TYPE_AAAA) and
record.name == self.server):
self.address = record.address
elif record.type == _TYPE_SRV and record.name == self.name:
self.server = record.server
self.port = record.port
self.weight = record.weight
self.priority = record.priority
self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
elif record.type == _TYPE_TXT and record.name == self.name:
self.setText(record.text)
def request(self, zeroconf, timeout):
"""Returns true if the service could be discovered on the
network, and updates this object with details discovered.
"""
now = currentTimeMillis()
delay = _LISTENER_TIME
next = now + delay
last = now + timeout
result = 0
try:
zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
while self.server is None or self.address is None or self.text is None:
if last <= now:
return 0
if next <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
if self.server is not None:
out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
zeroconf.send(out)
next = now + delay
delay = delay * 2
zeroconf.wait(min(next, last) - now)
now = currentTimeMillis()
result = 1
finally:
zeroconf.removeListener(self)
return result
def __eq__(self, other):
"""Tests equality of service name"""
if isinstance(other, ServiceInfo):
return other.name == self.name
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def __repr__(self):
"""String representation"""
result = "service[%s,%s:%s," % (self.name, ntop(self.getAddress()), self.port)
if self.text is None:
result += "None"
else:
if len(self.text) < 20:
result += self.text
else:
result += self.text[:17] + "..."
result += "]"
return result
class Zeroconf(object):
"""Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
"""
def __init__(self, bindaddress=None):
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads."""
globals()['_GLOBAL_DONE'] = 0
if bindaddress is None:
self.intf = socket.gethostbyname(socket.gethostname())
else:
self.intf = bindaddress
self.group = ('', _MDNS_PORT)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except:
# SO_REUSEADDR should be equivalent to SO_REUSEPORT for
# multicast UDP sockets (p 731, "TCP/IP Illustrated,
# Volume 2"), but some BSD-derived systems require
# SO_REUSEPORT to be specified explicity. Also, not all
# versions of Python have SO_REUSEPORT available. So
# if you're on a BSD-based system, and haven't upgraded
# to Python 2.3 yet, you may find this library doesn't
# work as expected.
#
pass
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
try:
self.socket.bind(self.group)
except:
# Some versions of linux raise an exception even though
# the SO_REUSE* options have been set, so ignore it
#
pass
#self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.listeners = []
self.browsers = []
self.services = {}
self.servicetypes = {}
self.cache = DNSCache()
self.condition = threading.Condition()
self.engine = Engine(self)
self.listener = Listener(self)
self.reaper = Reaper(self)
def isLoopback(self):
return self.intf.startswith("127.0.0.1")
def isLinklocal(self):
return self.intf.startswith("169.254.")
def wait(self, timeout):
"""Calling thread waits for a given number of milliseconds or
until notified."""
self.condition.acquire()
self.condition.wait(timeout/1000)
self.condition.release()
def notifyAll(self):
"""Notifies all waiting threads"""
self.condition.acquire()
self.condition.notifyAll()
self.condition.release()
def getServiceInfo(self, type, name, timeout=3000):
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type, name)
if info.request(self, timeout):
return info
return None
def addServiceListener(self, type, listener):
"""Adds a listener for a particular service type. This object
will then have its updateRecord method called when information
arrives for that type."""
self.removeServiceListener(listener)
self.browsers.append(ServiceBrowser(self, type, listener))
def removeServiceListener(self, listener):
"""Removes a listener from the set that is currently listening."""
for browser in self.browsers:
if browser.listener == listener:
browser.cancel()
del(browser)
def registerService(self, info, ttl=_DNS_TTL):
"""Registers service information to the network with a default TTL
of 60 seconds. Zeroconf will then respond to requests for
information for that service. The name of the service may be
changed if needed to make it unique on the network."""
self.checkService(info)
self.services[info.name.lower()] = info
if self.servicetypes.has_key(info.type):
self.servicetypes[info.type]+=1
else:
self.servicetypes[info.type]=1
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, ttl, info.address), 0)
self.send(out)
i += 1
nextTime += _REGISTER_TIME
def unregisterService(self, info):
"""Unregister a service."""
try:
del(self.services[info.name.lower()])
if self.servicetypes[info.type]>1:
self.servicetypes[info.type]-=1
else:
del self.servicetypes[info.type]
except:
pass
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nextTime += _UNREGISTER_TIME
def unregisterAllServices(self):
"""Unregister all registered services."""
if not self.services:
return
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in self.services.values():
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nextTime += _UNREGISTER_TIME
def checkService(self, info):
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
for record in self.cache.entriesWithName(info.type):
if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
if (info.name.find('.') < 0):
info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
self.checkService(info)
return
raise NonUniqueNameException
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
self.debug = out
out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
self.send(out)
i += 1
nextTime += _CHECK_TIME
def addListener(self, listener, question):
"""Adds a listener for a given question. The listener will have
its updateRecord method called when information is available to
answer the question."""
now = currentTimeMillis()
self.listeners.append(listener)
if question is not None:
for record in self.cache.entriesWithName(question.name):
if question.answeredBy(record) and not record.isExpired(now):
listener.updateRecord(self, now, record)
self.notifyAll()
def removeListener(self, listener):
"""Removes a listener."""
try:
self.listeners.remove(listener)
self.notifyAll()
except:
pass
def updateRecord(self, now, rec):
"""Used to notify listeners of new information that has updated
a record."""
for listener in self.listeners:
listener.updateRecord(self, now, rec)
self.notifyAll()
def handleResponse(self, msg):
"""Deal with incoming response packets. All answers
are held in the cache, and listeners are notified."""
now = currentTimeMillis()
for record in msg.answers:
expired = record.isExpired(now)
if record in self.cache.entries():
if expired:
self.cache.remove(record)
else:
entry = self.cache.get(record)
if entry is not None:
entry.resetTTL(record)
record = entry
else:
self.cache.add(record)
self.updateRecord(now, record)
def handleQuery(self, msg, addr, port):
"""Deal with incoming query packets. Provides a response if
possible."""
out = None
# Support unicast client responses
#
if port != _MDNS_PORT:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
for question in msg.questions:
out.addQuestion(question)
for question in msg.questions:
if question.type == _TYPE_PTR:
if question.name == "_services._dns-sd._udp.local.":
for stype in self.servicetypes.keys():
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
for service in self.services.values():
if question.name == service.type:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
else:
try:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
# Answer A record queries for any service addresses we know
if question.type in (_TYPE_A, _TYPE_AAAA, _TYPE_ANY):
for service in self.services.values():
if service.server == question.name.lower():
out.addAnswer(msg, DNSAddress(question.name, address_type(service.address), _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
service = self.services.get(question.name.lower(), None)
if not service: continue
if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
if question.type == _TYPE_SRV:
out.addAdditionalAnswer(DNSAddress(service.server, address_type(service.address), _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
except:
traceback.print_exc()
if out is not None and out.answers:
out.id = msg.id
self.send(out, addr, port)
def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
"""Sends an outgoing packet."""
# This is a quick test to see if we can parse the packets we generate
#temp = DNSIncoming(out.packet())
try:
self.socket.sendto(out.packet(), 0, (addr, port))
except:
# Ignore this, it may be a temporary loss of network connection
pass
def close(self):
"""Ends the background threads, and prevent this instance from
servicing further queries."""
if globals()['_GLOBAL_DONE'] == 0:
globals()['_GLOBAL_DONE'] = 1
self.notifyAll()
self.engine.notify()
self.unregisterAllServices()
self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.socket.close()
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
if __name__ == '__main__':
print "Multicast DNS Service Discovery for Python, version", __version__
r = Zeroconf()
print "1. Testing registration of a service..."
desc = {'version':'0.10','a':'test value', 'b':'another value'}
info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
print " Registering service..."
r.registerService(info)
print " Registration done."
print "2. Testing query of service information..."
print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
print " Query done."
print "3. Testing query of own service..."
print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
print " Query done."
print "4. Testing unregister of service information..."
r.unregisterService(info)
print " Unregister done."
r.close()
| gpl-3.0 | -5,357,106,716,623,870,000 | 33.953371 | 181 | 0.564655 | false |
sstocker46/pyrobotlab | home/hairygael/InMoov2.full3.byGael.Langevin.1.py | 1 | 117376 | #file : InMoov2.full3.byGael.Langevin.1.py
# this script is provided as a basic guide
# most parts can be run by uncommenting them
# InMoov now can be started in modular pieces
import random
leftPort = "COM20"
rightPort = "COM7"
i01 = Runtime.createAndStart("i01", "InMoov")
#inmoov = Runtime.createAndStart("alice", "ProgramAB")
#inmoov.startSession()
directionServo = Runtime.createAndStart("directionServo","Servo")
forwardServo = Runtime.createAndStart("forwardServo","Servo")
#cleverbot = Runtime.createAndStart("cleverbot","CleverBot")
# starts everything
##i01.startAll(leftPort, rightPort)
directionServo.attach("COM7", 12)
forwardServo.attach("COM7", 13)
# starting parts
i01.startMouthControl(leftPort)
i01.startMouth()
#to tweak the default voice
i01.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
i01.startHead(leftPort)
##############
# tweaking default settings of jaw
i01.head.jaw.setMinMax(65,90)
#i01.head.jaw.map(0,180,10,35)
i01.mouthControl.setmouth(65,90)
i01.head.jaw.setRest(90)
# tweaking default settings of eyes
i01.head.eyeY.setMinMax(0,180)
i01.head.eyeY.map(0,180,80,100)
i01.head.eyeY.setRest(85)
i01.head.eyeX.setMinMax(0,180)
i01.head.eyeX.map(0,180,70,100)
i01.head.eyeX.setRest(85)
i01.head.neck.setMinMax(0,180)
i01.head.neck.map(0,180,15,155)
i01.head.neck.setRest(70)
i01.head.rothead.setMinMax(0,180)
i01.head.rothead.map(0,180,30,150)
i01.head.rothead.setRest(86)
###################
i01.startEyesTracking(leftPort)
i01.startHeadTracking(leftPort)
##############
i01.startEar()
##############
torso = i01.startTorso("COM20")
# tweaking default torso settings
torso.topStom.setMinMax(0,180)
torso.topStom.map(0,180,67,110)
torso.midStom.setMinMax(0,180)
torso.topStom.map(0,180,60,120)
#torso.lowStom.setMinMax(0,180)
#torso.topStom.setRest(90)
#torso.midStom.setRest(90)
#torso.lowStom.setRest(90)
##############
i01.startLeftHand(leftPort)
# tweaking default settings of left hand
i01.leftHand.thumb.setMinMax(0,180)
i01.leftHand.index.setMinMax(0,180)
i01.leftHand.majeure.setMinMax(0,180)
i01.leftHand.ringFinger.setMinMax(0,180)
i01.leftHand.pinky.setMinMax(0,180)
i01.leftHand.thumb.map(0,180,45,140)
i01.leftHand.index.map(0,180,40,140)
i01.leftHand.majeure.map(0,180,30,176)
i01.leftHand.ringFinger.map(0,180,25,175)
i01.leftHand.pinky.map(0,180,15,112)
################
i01.startLeftArm(leftPort)
#tweak defaults LeftArm
#i01.leftArm.bicep.setMinMax(0,90)
#i01.leftArm.rotate.setMinMax(46,160)
#i01.leftArm.shoulder.setMinMax(30,100)
#i01.leftArm.omoplate.setMinMax(10,75)
################
i01.startRightHand(rightPort,"atmega2560")
# tweaking defaults settings of right hand
i01.rightHand.thumb.setMinMax(0,180)
i01.rightHand.index.setMinMax(0,180)
i01.rightHand.majeure.setMinMax(0,180)
i01.rightHand.ringFinger.setMinMax(0,180)
i01.rightHand.pinky.setMinMax(0,180)
i01.rightHand.thumb.map(0,180,55,135)
i01.rightHand.index.map(0,180,35,140)
i01.rightHand.majeure.map(0,180,8,120)
i01.rightHand.ringFinger.map(0,180,40,125)
i01.rightHand.pinky.map(0,180,10,110)
#################
i01.startRightArm(rightPort)
# tweak default RightArm
#i01.rightArm.bicep.setMinMax(0,90)
#i01.rightArm.rotate.setMinMax(46,160)
#i01.rightArm.shoulder.setMinMax(30,100)
#i01.rightArm.omoplate.setMinMax(10,75)
################
# starting part with a reference, with a reference
# you can interact further
#opencv = i01.startOpenCV()
#opencv.startCapture()
# or you can use i01's reference
#i01.opencv.startCapture()
#i01.headTracking.faceDetect()
#i01.eyesTracking.faceDetect()
#i01.headTracking.pyramidDown()
############################################################
#to tweak the default PID values
i01.eyesTracking.xpid.setPID(20.0,5.0,0.1)
i01.eyesTracking.ypid.setPID(20.0,5.0,0.1)
i01.headTracking.xpid.setPID(12.0,5.0,0.1)
i01.headTracking.ypid.setPID(12.0,5.0,0.1)
############################################################
#i01.startPIR("COM20",30)
#def input():
#print 'python object is ', msg_clock_pulse
#pin = msg_i01_right_publishPin.data[0]
#print 'pin data is ', pin.pin, pin.value
#if (pin.value == 1):
#i01.mouth.speak("I was dreaming")
#powerup()
#relax()
############################################################
helvar = 1
weathervar = 1
# play rock paper scissors
inmoov = 0
human = 0
###############################################################
# after a start you may call detach to detach all
# currently attached servos
#i01.detach()
#i01.attach()
# auto detaches any attached servos after 120 seconds of inactivity
#i01.autoPowerDownOnInactivity(100)
#i01.speakErrors(false)
# purges any "auto" methods
#i01.purgeAllTasks()
# remote control services
# WebGUI - for more information see
# http://myrobotlab.org/service/WebGUI
# XMPP - for more information see
# http://myrobotlab.org/service/XMPP
# system check - called at anytime
#i01.systemCheck()
# take the current position of all attached servos <- FIXME
# and create a new method named "newGesture"
#i01.captureGesture("newGesture")
# all ear associations are done python startEar() only starts
# the peer service
# After ear.startListening(), the ear will listen for commands
#############################################################################################
# i01.systemCheck()
#i01.mouth.speakBlocking(cleverbot.chat("hi"))
#i01.mouth.speakBlocking(cleverbot.chat("how are you"))
# verbal commands
ear = i01.ear
ear.addCommand("rest", "python", "rest")
ear.addCommand("attach head", "i01.head", "attach")
ear.addCommand("disconnect head", "i01.head", "detach")
ear.addCommand("attach eyes", "i01.head.eyeY", "attach")
ear.addCommand("disconnect eyes", "i01.head.eyeY", "detach")
ear.addCommand("attach right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect right hand", "i01.rightHand", "detach")
ear.addCommand("attach left hand", "i01.leftHand", "attach")
ear.addCommand("disconnect left hand", "i01.leftHand", "detach")
ear.addCommand("attach everything", "i01", "attach")
ear.addCommand("disconnect everything", "i01", "detach")
ear.addCommand("attach left arm", "i01.leftArm", "attach")
ear.addCommand("disconnect left arm", "i01.leftArm", "detach")
ear.addCommand("attach right arm", "i01.rightArm", "attach")
ear.addCommand("disconnect right arm", "i01.rightArm", "detach")
ear.addCommand("attach torso", "i01.torso", "attach")
ear.addCommand("disconnect torso", "i01.torso", "detach")
ear.addCommand("attach jaw", "i01.head.jaw", "attach")
ear.addCommand("disconnect jaw", "i01.head.jaw", "detach")
ear.addCommand("attach wheel", "directionServo","forwardServo", "attach")
ear.addCommand("disconnect wheel", "directionServo","forwardServo", "detach")
ear.addCommand("search humans", "python", "trackHumans")
ear.addCommand("quit search", "python", "stopTracking")
ear.addCommand("track", "python", "trackPoint")
ear.addCommand("freeze track", "python", "stopTracking")
ear.addCommand("open hand", "python", "handopen")
ear.addCommand("close hand", "python", "handclose")
ear.addCommand("camera on", i01.getName(), "cameraOn")
ear.addCommand("off camera", i01.getName(), "cameraOff")
ear.addCommand("capture gesture", i01.getName(), "captureGesture")
# FIXME - lk tracking setpoint
ear.addCommand("giving", i01.getName(), "giving")
ear.addCommand("fighter", i01.getName(), "fighter")
ear.addCommand("fist hips", "python", "fistHips")
ear.addCommand("look at this", i01.getName(), "lookAtThis")
ear.addCommand("victory", i01.getName(), "victory")
ear.addCommand("arms up", "python", "armsUp")
ear.addCommand("arms front", i01.getName(), "armsFront")
ear.addCommand("da vinci", i01.getName(), "daVinci")
# FIXME -
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("stop listening", ear.getName(), "stopListening")
##sets the servos back to full speed, anywhere in sequence or gestures
ear.addCommand("full speed", "python", "fullspeed")
##sequence1
ear.addCommand("grab the bottle", "python", "grabthebottle")
ear.addCommand("take the glass", "python", "grabtheglass")
ear.addCommand("poor bottle", "python", "poorbottle")
ear.addCommand("give the glass", "python", "givetheglass")
##sequence2
ear.addCommand("take the ball", "python", "takeball")
ear.addCommand("reach the ball", "python", "getball")
ear.addCommand("keep the ball", "python", "keepball")
ear.addCommand("approach the left hand", "python", "approachlefthand")
ear.addCommand("use the left hand", "python", "uselefthand")
ear.addCommand("more", "python", "more")
ear.addCommand("hand down", "python", "handdown")
ear.addCommand("is it a ball", "python", "isitaball")
ear.addCommand("put it down", "python", "putitdown")
ear.addCommand("drop it", "python", "dropit")
ear.addCommand("remove your left arm", "python", "removeleftarm")
ear.addCommand("relax", "python", "relax")
##sequence2 in one command
ear.addCommand("what is it", "python", "studyball")
##extras
ear.addCommand("perfect", "python", "perfect")
ear.addCommand("delicate grab", "python", "delicategrab")
ear.addCommand("release delicate", "python", "releasedelicate")
ear.addCommand("open your right hand", "python", "openrighthand")
ear.addCommand("open your left hand", "python", "openlefthand")
ear.addCommand("close your right hand", "python", "closerighthand")
ear.addCommand("close your left hand", "python", "closelefthand")
ear.addCommand("slowly close your right hand", "python", "slowlycloserighthand")
ear.addCommand("surrender", "python", "surrender")
ear.addCommand("picture on the right side", "python", "picturerightside")
ear.addCommand("picture on the left side", "python", "pictureleftside")
ear.addCommand("picture on both sides", "python", "picturebothside")
ear.addCommand("look on your right side", "python", "lookrightside")
ear.addCommand("look on your left side", "python", "lookleftside")
ear.addCommand("look in the middle", "python", "lookinmiddle")
ear.addCommand("before happy", "python", "beforehappy")
ear.addCommand("happy birthday", "python", "happy")
#ear.addCommand("photo", "python", "photo")
ear.addCommand("about", "python", "about")
ear.addCommand("power down", "python", "power_down")
ear.addCommand("power up", "python", "power_up")
ear.addCommand("servo", "python", "servos")
ear.addCommand("how many fingers do you have", "python", "howmanyfingersdoihave")
ear.addCommand("who's there", "python", "welcome")
ear.addCommand("start gesture", "python", "startkinect")
ear.addCommand("off gesture", "python", "offkinect")
ear.addCommand("cycle gesture one", "python", "cyclegesture1")
ear.addCommand("cycle gesture two", "python", "cyclegesture2")
ear.addCommand("cycle gesture three", "python", "cyclegesture3")
ear.addCommand("show your muscles", "python", "muscle")
ear.addCommand("shake hand", "python", "shakehand")
ear.addCommand("unhappy", "python", "unhappy")
ear.addCommand("take this", "python", "takethis")
ear.addCommand("rock paper scissors", "python", "rockpaperscissors")
ear.addCommand("ready", "python", "ready")
ear.addCommand("rock", "python", "rock")
ear.addCommand("paper", "python", "paper")
ear.addCommand("scissors", "python", "scissors")
ear.addCommand("that was fun", "python", "thatwasfun")
ear.addCommand("guess what", "python", "guesswhat")
ear.addCommand("finger right", "python", "fingerright")
ear.addCommand("finger left", "python", "fingerleft")
ear.addCommand("come here", "python", "comehere")
ear.addCommand("approach", "python", "approach")
ear.addCommand("brake", "python", "brake")
ear.addCommand("made by", "python", "madeby")
ear.addCommand("test", "python", "test1")
ear.addCommand("phone home", "python", "phonehome")
ear.addCommand("how do you feel", "python", "newyork")
ear.addCommand("play your song", "python", "playsong")
ear.addCommand("quit your action", "python", "stopit")
ear.addCommand("carry baby", "python", "carrybaby")
ear.addCommand("system check", "python", "systemcheck")
#ear.addCommand("watch out", "python", "watch out")
ear.addComfirmations("yes","correct","ya","yeah")
ear.addNegations("no","wrong","nope","nah")
ear.startListening("yes | no | very good, thank you | it's okay | no thanks | no thank you | sorry | how do you do | hello | i know | yes let's play again | i have rock | i have paper | i have scissors | look at the people | pause | can i have your attention | good morning | very good | italian hello | alessandro | bye bye | i love you | thanks | thank you | shake hand| what about star wars | where are you from | nice | what is the weather | are you hungry | do you speak hindi | go forward | go backwards | watch out | to the left | to the right | go straight")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", "python", "heard")
#inmoov.addTextListener(i01.mouth)
def carrybaby():
i01.moveHead(18,111,85,85,5)
i01.moveArm("left",81,50,45,16)
i01.moveArm("right",78,44,50,31)
i01.moveHand("left",180,180,180,180,180,25)
i01.moveHand("right",111,128,140,151,169,86)
i01.moveTorso(90,90,90)
def slowlycloserighthand():
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,0.8,1.0,1.0)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,0.8,0.8,0.7,1.0,1.0)
i01.setHeadSpeed(0.8,0.8)
i01.moveHead(30,60)
i01.moveArm("right",5,80,30,10)
i01.moveHand("right",176,173,175,175,2,180)
def stopit():
lookinmiddle()
sleep(1)
relax()
i01.mouth.speak("yes")
if (data == "pause"):
i01.mouth.speak("yes")
def playsong():
data = msg_i01_ear_recognized.data[0]
if (data == "can i have your attention"):
i01.mouth.speak("ok you have my attention")
stopit()
i01.mouth.speak("electro funk inmoov")
i01.setHeadSpeed(1.0,1.0)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
#for x in range(5):
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
sleep(2)
i01.moveHead(110,80)
sleep(2)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
sleep(3)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
sleep(3)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
sleep(3)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
sleep(3)
i01.moveTorso(90,90,90)
fullspeed()
i01.giving()
sleep(5)
i01.armsFront()
sleep(4)
fullspeed()
i01.daVinci()
sleep(5)
surrender()
sleep(6)
i01.giving()
sleep(6)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
sleep(3)
i01.moveHead(60,90)
fingerright()
sleep(3)
i01.moveHead(110,80)
fingerleft()
relax()
i01.moveTorso(90,90,90)
sleep(3)
fullspeed()
sleep(3)
madeby()
relax()
sleep(5)
i01.detach()
def newyork():
i01.mouth.speak("robot1")
i01.setHeadSpeed(1.0,1.0,1.0,1.0,1.0)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(90,90)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,140)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,140)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(90,90)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",8,85,28,12)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",7,82,33,13)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",6,85,28,10)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,140)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(60,107)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",7,87,33,11)
i01.moveHand("left",40,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(0.45)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",40,171,180,180,180,140)
i01.moveHand("right",2,2,2,2,2,90)
sleep(1.5)
i01.setHeadSpeed(0.85,0.85)
i01.setArmSpeed("left",0.90,0.90,0.90,0.90)
i01.setArmSpeed("right",0.90,0.90,0.90,0.90)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(75,97)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
sleep(1)
i01.mouth.speakBlocking("Start spreading the news")
i01.setHeadSpeed(0.85,0.85)
i01.setArmSpeed("left",0.80,0.80,0.80,0.80)
i01.setArmSpeed("right",0.80,0.80,0.80,0.80)
i01.setHandSpeed("left",0.80,0.80,0.80,0.80,0.80,0.80)
i01.setHandSpeed("right",0.80,0.80,0.80,0.80,0.80,0.80)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(160,107)
i01.moveArm("left",5,86,30,10)
i01.moveArm("right",86,140,83,80)
i01.moveHand("left",99,140,173,167,130,26)
i01.moveHand("right",135,6,170,145,168,180)
i01.moveTorso(90,90,90)
sleep(0.8)
i01.mouth.speakBlocking("I am leaving today")
i01.moveHead(160,68)
i01.moveArm("left",5,86,30,10)
i01.moveArm("right",86,140,83,80)
i01.moveHand("left",99,140,173,167,130,26)
i01.moveHand("right",135,6,170,145,168,180)
i01.moveTorso(90,90,90)
sleep(0.4)
i01.mouth.speakBlocking("I want to be a part of it")
i01.moveHead(138,86)
i01.moveArm("left",80,112,52,34)
i01.moveArm("right",80,122,59,54)
i01.moveHand("left",105,76,71,98,76,90)
i01.moveHand("right",55,0,55,48,142,93)
i01.moveTorso(90,90,90)
sleep(0.5)
i01.mouth.speakBlocking("New York, New York")
i01.moveHead(138,86)
i01.moveArm("left",80,112,52,34)
i01.moveArm("right",80,122,59,54)
i01.moveHand("left",105,76,71,98,76,90)
i01.moveHand("right",55,0,55,48,142,93)
i01.moveTorso(90,90,90)
sleep(0.4)
i01.mouth.speakBlocking("If I can make it there")
i01.moveHead(160,86)
i01.moveArm("left",80,128,71,62)
i01.moveArm("right",80,132,69,80)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,0,55,48,142,72)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.mouth.speakBlocking("I'll make it anywhere")
i01.moveHead(160,86)
i01.moveArm("left",80,128,71,62)
i01.moveArm("right",80,132,69,80)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,0,55,48,142,72)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.moveHead(136,66)
i01.moveArm("left",5,138,30,77)
i01.moveArm("right",5,134,59,75)
i01.moveHand("left",127,101,122,129,123,131)
i01.moveHand("right",55,2,50,48,30,90)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.mouth.speakBlocking("It's up to you")
i01.moveHead(160,86)
i01.moveArm("left",46,131,30,80)
i01.moveArm("right",71,145,36,80)
i01.moveHand("left",45,40,30,96,107,90)
i01.moveHand("right",55,4,50,49,114,90)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.mouth.speakBlocking("New York, New York")
sleep(2)
relax()
def phonehome():
relax()
i01.setHeadSpeed(1.0,1.0,1.0,1.0,1.0)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(160,68)
i01.moveArm("left",5,86,30,20)
i01.moveArm("right",86,140,83,80)
i01.moveHand("left",99,140,173,167,130,26)
i01.moveHand("right",135,6,170,145,168,180)
i01.moveTorso(25,80,90)
sleep(2)
i01.mouth.speakBlocking("E,T phone the big home of the inmoov nation")
sleep(0.2)
relax()
def test1():
rest()
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(50,110)
i01.moveArm("left",88,90,70,23)
i01.moveArm("right",73,90,70,27)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
i01.moveTorso(90,90,90)
sleep(2)
def madeby():
relax()
sleep(1)
i01.moveHead(80,86)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,2,50,48,30,90)
i01.moveTorso(90,90,90)
sleep(3)
#i01.mouth.speakBlocking("hello")
i01.mouth.speakBlocking("bonjour")
i01.moveHead(80,98)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,2,50,48,30,90)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(90,89)
i01.moveArm("left",42,104,30,10)
i01.moveArm("right",33,116,30,10)
i01.moveHand("left",45,40,30,25,35,120)
i01.moveHand("right",55,2,50,48,30,40)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(80,98)
i01.moveArm("left",5,99,30,16)
i01.moveArm("right",5,94,30,16)
i01.moveHand("left",120,116,110,115,98,73)
i01.moveHand("right",114,146,125,113,117,109)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("my name is inmoov")
i01.mouth.speakBlocking("je m'appelle inmouv")
i01.moveHead(68,90)
i01.moveArm("left",5,99,30,16)
i01.moveArm("right",85,102,38,16)
i01.moveHand("left",120,116,110,115,98,73)
i01.moveHand("right",114,146,161,132,168,19)
i01.moveTorso(90,90,90)
sleep(0.5)
##i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
##i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
##i01.setHeadSpeed(1.0, 0.90)
##i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(87,94)
i01.moveArm("left",5,99,36,16)
i01.moveArm("right",81,105,42,16)
i01.moveHand("left",120,116,110,115,98,50)
i01.moveHand("right",114,118,131,132,168,19)
i01.moveTorso(90,90,90)
sleep(1)
#i01.mouth.speakBlocking("I am created by gael langevin")
i01.mouth.speakBlocking("j'ai ete creer par gael langevin")
i01.setHandSpeed("left", 0.90, 0.90, 0.90, 0.90, 0.90, 0.95)
i01.setHandSpeed("right", 0.90, 0.90, 0.90, 0.90, 0.90, 0.95)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.90, 1.0, 1.0, 1.0)
##i01.setHeadSpeed(1.0, 0.90)
##i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(105,94)
i01.moveArm("left",5,99,36,16)
i01.moveArm("right",81,105,42,16)
i01.moveHand("left",120,116,110,115,98,50)
i01.moveHand("right",114,118,131,132,168,19)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.moveHead(80,86)
i01.moveArm("left",5,96,25,10)
i01.moveArm("right",5,94,26,10)
i01.moveHand("left",110,62,56,88,81,18)
i01.moveHand("right",78,88,101,95,81,137)
i01.moveTorso(90,90,90)
sleep(0.2)
i01.moveHead(75,97)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("who is a french sculptor, designer")
i01.mouth.speakBlocking("qui est un sculpteur, designer francais")
sleep(0.5)
i01.moveHead(80,86)
i01.moveArm("left",5,96,25,10)
i01.moveArm("right",5,94,26,10)
i01.moveHand("left",110,62,56,88,81,18)
i01.moveHand("right",78,88,101,95,81,137)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(75,97)
i01.moveArm("left",6,91,22,14)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,0)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
i01.mouth.speakBlocking("my software is being developped by myrobtlab dot org")
i01.mouth.speakBlocking("mon logiciel est developpe par myrobotlab point org")
sleep(1)
i01.moveHead(20,69)
i01.moveArm("left",6,91,22,14)
i01.moveArm("right",87,107,32,21)
i01.moveHand("left",110,62,56,88,81,0)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("I am totally build with 3 D printed parts")
i01.mouth.speakBlocking("je suis entierement imprimer en 3 D")
i01.moveHead(75,97)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(33,110)
i01.moveArm("left",85,104,25,18)
i01.moveArm("right",87,41,47,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",111,75,117,125,111,143)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(62,102)
i01.moveArm("left",85,104,25,18)
i01.moveArm("right",87,41,47,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",111,75,117,125,111,143)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("which means all my parts")
i01.mouth.speakBlocking("ce qui veut dire que toutes mes pieces,")
i01.moveHead(79,88)
i01.moveArm("left",85,104,25,18)
i01.moveArm("right",87,59,46,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",59,75,117,125,111,113)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("are made on a home 3 D printer")
i01.mouth.speakBlocking("sont fabriquer sur une petite imprimante familiale")
sleep(1)
i01.moveHead(40,84)
i01.moveArm("left",85,72,38,18)
i01.moveArm("right",87,64,47,18)
i01.moveHand("left",124,97,66,120,130,35)
i01.moveHand("right",59,75,117,125,111,113)
i01.moveTorso(90,90,90)
#i01.mouth.speakBlocking("each parts are design to fit 12 centimeter cube build area")
i01.mouth.speakBlocking("chaque piece est concu dans un format de 12 centimetre cube,")
sleep(1)
i01.moveHead(97,80)
i01.moveArm("left",85,79,39,14)
i01.moveArm("right",87,76,42,12)
i01.moveHand("left",124,97,66,120,130,35)
i01.moveHand("right",59,75,117,125,111,113)
i01.moveTorso(90,90,90)
sleep(0.5)
i01.moveHead(75,97)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
sleep(1)
#i01.mouth.speakBlocking("so anyone can reproduce me")
i01.mouth.speakBlocking("de facon a ce que tout le monde puisse me reproduire")
fullspeed()
i01.moveHead(80,98)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,2,50,48,30,90)
i01.moveTorso(90,90,90)
sleep(1)
#i01.mouth.speakBlocking("cool, don't you think")
i01.mouth.speakBlocking("c'est cool, vous ne trouvez pas")
sleep(1)
#i01.mouth.speakBlocking("thank you for listening")
i01.mouth.speakBlocking("merci de votre attention")
i01.moveHead(116,80)
i01.moveArm("left",85,93,42,16)
i01.moveArm("right",87,93,37,18)
i01.moveHand("left",124,82,65,81,41,143)
i01.moveHand("right",59,53,89,61,36,21)
i01.moveTorso(90,90,90)
sleep(0.2)
relax()
def brake():
i01.moveHead(80,86)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",55,2,50,48,30,90)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(20,86)
i01.moveArm("left",21,92,49,22)
i01.moveArm("right",38,91,43,10)
i01.moveHand("left",45,40,30,25,35,90)
i01.moveHand("right",89,127,123,48,30,90)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(20,106)
i01.moveArm("left",75,69,49,22)
i01.moveArm("right",38,91,43,10)
i01.moveHand("left",120,80,74,106,35,90)
i01.moveHand("right",89,127,123,48,30,90)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(20,93)
i01.moveArm("left",75,69,49,22)
i01.moveArm("right",71,66,60,10)
i01.moveHand("left",120,80,74,106,35,90)
i01.moveHand("right",89,127,123,48,30,146)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(110,93)
i01.moveArm("left",75,69,49,22)
i01.moveArm("right",71,66,60,10)
i01.moveHand("left",120,80,74,106,35,90)
i01.moveHand("right",89,127,123,48,30,146)
i01.moveTorso(90,90,90)
sleep(3)
i01.mouth.speakBlocking("Should I brake that")
i01.moveHead(110,93)
i01.moveArm("left",90,69,84,22)
i01.moveArm("right",71,66,60,10)
i01.moveHand("left",138,134,168,168,120,90)
i01.moveHand("right",124,142,151,48,30,146)
i01.moveTorso(90,90,90)
def approach():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 0.90)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(92,80)
i01.moveArm("left",7,76,24,16)
i01.moveArm("right",7,79,24,15)
i01.moveHand("left",49,43,30,28,40,80)
i01.moveHand("right",55,7,55,48,43,108)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(92,80)
i01.moveArm("left",5,52,57,13)
i01.moveArm("right",10,45,59,13)
i01.moveHand("left",134,138,176,175,130,0)
i01.moveHand("right",119,150,163,134,151,180)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(92,80)
i01.moveArm("left",14,63,71,21)
i01.moveArm("right",14,55,77,21)
i01.moveHand("left",49,43,30,28,40,171)
i01.moveHand("right",55,7,55,48,43,12)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(92,80)
i01.moveArm("left",5,52,57,13)
i01.moveArm("right",10,45,59,13)
i01.moveHand("left",134,138,176,175,130,0)
i01.moveHand("right",119,150,163,134,151,180)
i01.moveTorso(90,90,90)
i01.mouth.speakBlocking("please approach")
relax()
def fingerright():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 1.0, 0.85, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.90, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 0.90)
i01.setTorsoSpeed(0.9, 0.5, 1.0)
i01.moveHead(80,86)
i01.moveArm("left",5,94,20,10)
i01.moveArm("right",7,78,92,10)
i01.moveHand("left",180,180,180,180,180,90)
i01.moveHand("right",180,2,175,160,165,180)
i01.moveTorso(60,70,90)
def fingerleft():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 1.0, 0.85, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.90, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 0.90)
i01.setTorsoSpeed(0.9, 0.5, 1.0)
i01.moveHead(80,86)
i01.moveArm("left",7,78,92,10)
i01.moveArm("right",5,94,20,10)
i01.moveHand("left",180,2,175,160,165,90)
i01.moveHand("right",180,180,180,180,180,90)
i01.moveTorso(120,110,90)
def comehere():
fullspeed()
relax()
##look around
i01.setHeadSpeed(0.80, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(80,66,7,85,52)
sleep(3)
i01.setHeadSpeed(0.80, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(80,110,175,85,52)
sleep(3)
##raise arm point finger
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 1.0, 0.85, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.90, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 0.90)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(80,86,85,85,52)
i01.moveArm("left",5,94,30,10)
i01.moveArm("right",7,74,92,10)
i01.moveHand("left",180,180,180,180,180,90)
i01.moveHand("right",180,2,175,160,165,180)
i01.moveTorso(90,90,90)
sleep(4.5)
##move finger
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(80,86)
i01.moveArm("left",5,94,30,10)
i01.moveArm("right",48,74,92,10)
i01.moveHand("left",180,180,180,180,180,90)
i01.moveHand("right",180,2,175,160,165,20)
i01.moveTorso(90,90,90)
sleep(2)
i01.setHeadSpeed(0.80, 0.80)
i01.moveHead(80,80)
i01.moveHand("right",180,164,175,160,165,20)
sleep(1)
i01.moveHead(80,80)
i01.moveHand("right",180,2,175,160,165,20)
sleep(1)
i01.moveHead(118,80)
i01.moveHand("right",180,164,175,160,165,20)
sleep(1)
i01.mouth.speak("come closer")
i01.moveHead(60,80)
i01.moveHand("right",180,2,175,160,165,20)
sleep(1)
i01.moveHead(118,80)
i01.moveHand("right",180,164,175,160,165,20)
sleep(1)
i01.moveHead(60,80)
i01.moveArm("right",90,65,10,25)
sleep(3)
fullspeed()
rest()
sleep(0.3)
relax()
sleep(3)
fullspeed()
def guesswhat():
i01.mouth.speak("I'm not really a human man")
i01.mouth.speak("but I use Old spice body wash and deodorant together")
i01.mouth.speak("and now I'm really cool")
def rockpaperscissors():
fullspeed()
i01.mouth.speak("lets play first to 3 points win")
sleep(4)
rockpaperscissors2()
def rockpaperscissors2():
x = (random.randint(1, 3))
if x == 1:
ready()
sleep(2)
rock()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("zero zero")
if x == 2:
i01.mouth.speak("no no")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("paper beats rock")
if x == 2:
i01.mouth.speak("your point")
if x == 3:
i01.mouth.speak("you got this one")
global human
human += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point for me")
if x == 2:
i01.mouth.speak("going fine")
if x == 3:
i01.mouth.speak("rock beats scissors")
global inmoov
inmoov += 1
sleep(1)
if x == 2:
ready()
sleep(2)
paper()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point")
if x == 2:
i01.mouth.speak("paper beats rock")
if x == 3:
i01.mouth.speak("my point")
global inmoov
inmoov += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no points")
if x == 2:
i01.mouth.speak("ok lets try again")
sleep(2)
if x == 3:
i01.mouth.speak("again")
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no you get 1 point")
if x == 2:
i01.mouth.speak("this is not good for me")
if x == 3:
i01.mouth.speak("your point")
global human
human += 1
sleep(1)
if x == 3:
ready()
sleep(2)
scissors()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no")
if x == 2:
i01.mouth.speak("rock beats scissors")
if x == 3:
i01.mouth.speak("i feel generous today")
global human
human += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("i've got you")
if x == 2:
i01.mouth.speak("my point")
if x == 3:
i01.mouth.speak("good")
global inmoov
inmoov += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no no")
if x == 2:
i01.mouth.speak("zero zero")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if inmoov == 3:
stoprockpaperscissors()
sleep(1)
elif human == 3: # changed from if to elif
stoprockpaperscissors()
sleep(1)
elif inmoov <= 2: # changed from if to elif
rockpaperscissors2()
elif human <= 2: # changed from if to elif
rockpaperscissors2()
def stoprockpaperscissors():
rest()
sleep(5)
if inmoov < human:
i01.mouth.speak("congratulations you won with" + str(human - inmoov) + "points")
sleep(3)
i01.mouth.speak(str(human) + "points to you and" + str(inmoov) + "points to me")
elif inmoov > human: # changed from if to elif
i01.mouth.speak("yes yes i won with" + str(inmoov - human) + "points")
sleep(3)
i01.mouth.speak("i've got " + str(inmoov) + "points and you got" + str(human) + "points")
elif inmoov == human: # changed from if to elif
i01.mouth.speak("none of us won we both got" + str(inmoov) + "points")
global inmoov
inmoov = 0
global human
human = 0
i01.mouth.speak("that was fun")
sleep(2)
i01.mouth.speak("do you want to play again")
sleep(10)
data = msg_i01_ear_recognized.data[0]
if (data == "yes let's play again"):
rockpaperscissors2()
elif (data == "yes"): # changed from if to elif
rockpaperscissors2()
elif (data == "no thanks"): # changed from if to elif
i01.mouth.speak("maybe some other time")
sleep(4)
power_down()
elif (data == "no thank you"): # changed from if to elif
i01.mouth.speak("maybe some other time")
sleep(4)
power_down()
##i01.mouth.speak("ok i'll find something else to do then")
##lookaroundyou()
def ready():
i01.mouth.speak("ready")
i01.mouth.speak("go")
i01.moveHead(90,90)
i01.moveArm("left",65,90,75,10)
i01.moveArm("right",20,80,25,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
def rock():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",45,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,80)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have rock what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def paper():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",0,0,0,0,0,165)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have paper what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def scissors():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",50,0,0,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have scissors what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def lookaroundyou():
i01.setHeadSpeed(0.8, 0.8, 0.6, 0.6, 1.0)
for y in range(0, 3):
data = msg_i01_ear_recognized.data[0]
if (data == "can i have your attention"):
i01.mouth.speak("ok you have my attention")
stopit()
x = (random.randint(1, 6))
if x == 1:
i01.head.neck.moveTo(90)
eyeslooking()
if x == 2:
i01.head.rothead.moveTo(80)
eyeslooking()
if x == 3:
headdown()
eyeslooking()
if x == 4:
headupp()
eyeslooking()
if x == 5:
headright()
eyeslooking()
if x == 6:
headleft()
eyeslooking()
sleep(1)
x = (random.randint(1, 4))
if x == 1:
i01.mouth.speak("looking nice")
if x == 2:
i01.mouth.speak("i like it here")
if x == 3:
i01.mouth.speak("time just flies away")
if x == 4:
i01.mouth.speak("ok let's do something")
sleep(2)
x = (random.randint(1, 4))
if x == 1:
comehere()
if x == 2:
perfect()
sleep(2)
rest()
sleep(1)
relax()
if x == 3:
rest()
if x == 4:
fingerleft()
sleep(3)
relax()
def eyeslooking():
for y in range(0, 5):
data = msg_i01_ear_recognized.data[0]
if (data == "can i have your attention"):
i01.mouth.speak("ok you have my attention")
stopit()
if (data == "inmoov"):
stopit()
x = (random.randint(1, 6))
if x == 1:
i01.head.eyeX.moveTo(80)
if x == 2:
i01.head.eyeY.moveTo(80)
if x == 3:
eyesdown()
if x == 4:
eyesupp()
if x == 5:
eyesleft()
if x == 6:
eyesright()
sleep(0.5)
eyesfront()
def thatwasfun():
i01.mouth.speak("that was fun")
i01.moveHead(90,90)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
relax()
###############################################################################
def heard(data):
data = msg_i01_ear_recognized.data[0]
if (data == "it's okay"):
i01.mouth.speak("good")
if (data == "very good, thank you"):
i01.mouth.speak("okay, good")
if (data == "look at the people"):
i01.setHeadSpeed(0.8, 0.8)
for y in range(0, 10):
x = (random.randint(1, 5))
if x == 1:
fullspeed()
i01.head.neck.moveTo(90)
eyeslooking()
sleep(2)
trackHumans()
sleep(10)
stopTracking()
if x == 2:
fullspeed()
i01.head.rothead.moveTo(80)
eyeslooking()
sleep(2)
trackHumans()
sleep(10)
stopTracking()
if x == 3:
fullspeed()
headdown()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
if x == 4:
fullspeed()
lookrightside()
sleep(2)
trackHumans()
sleep(10)
stopTracking()
if x == 5:
fullspeed()
lookleftside()
sleep(2)
trackHumans()
sleep(10)
stopTracking()
sleep(1)
lookinmiddle()
sleep(3)
i01.mouth.speak("nice to meet you all")
if (data == "take a look around"):
lookaroundyou()
if (data == "good morning"):
i01.mouth.speak("good morning")
x = (random.randint(1, 4))
if x == 1:
i01.mouth.speak("i hope you had a good night sleep")
if x == 2:
i01.mouth.speak("nice to see you again")
if x == 3:
i01.mouth.speak("this is going to be a good day")
if (data == "very good"):
i01.mouth.speak("thanks")
if (data =="alessandro"):
fullspeed()
i01.setHeadSpeed(0.85, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(60,40,7,85,52)
sleep(1)
i01.moveHead(80,40,7,85,52)
sleep(2)
i01.setHeadSpeed(0.92, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(100,40,7,85,52)
sleep(0.4)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
i01.moveHead(80,40,7,85,52)
i01.mouth.speakBlocking("alessandro, dove e la pizza")
sleep(1)
i01.moveHead(60,90,80,90,52)
sleep(0.8)
relax()
if (data =="italian hello"):
italianhello()
if (data =="are you hungry"):
fullspeed()
i01.setHeadSpeed(0.85, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(60,40,7,85,52)
sleep(1)
i01.moveHead(80,40,7,85,52)
sleep(2)
i01.setHeadSpeed(0.92, 0.80, 0.90, 0.90, 1.0)
i01.moveHead(100,40,7,85,52)
sleep(0.4)
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
i01.moveHead(80,40,7,85,52)
i01.mouth.speakBlocking("yes, i want some paneer tikka")
sleep(1)
i01.moveHead(60,90,80,90,52)
sleep(0.8)
relax()
if (data =="do you speak hindi"):
i01.mouth.speak("yes, i can speak any language")
i01.moveHead(116,80)
i01.moveArm("left",85,93,42,16)
i01.moveArm("right",87,93,37,18)
i01.moveHand("left",124,82,65,81,41,143)
i01.moveHand("right",59,53,89,61,36,21)
i01.moveTorso(90,90,90)
sleep(0.2)
sleep(1)
relax()
if (data == "where are you from"):
phonehome()
if (data == "what about star wars"):
x = (random.randint(1, 2))
if x == 1:
fullspeed()
i01.moveHead(130,149,87,80,100)
i01.mouth.speak("R2D2")
sleep(1)
i01.moveHead(155,31,87,80,100)
sleep(1)
i01.moveHead(130,31,87,80,100)
sleep(1)
i01.moveHead(90,90,87,80,100)
sleep(0.5)
i01.moveHead(90,90,87,80,70)
sleep(1)
relax()
if x == 2:
fullspeed()
i01.mouth.speak("Hello sir, I am C3po unicyborg relations")
i01.moveHead(138,80)
i01.moveArm("left",79,42,23,41)
i01.moveArm("right",71,40,14,39)
i01.moveHand("left",180,180,180,180,180,47)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(116,80)
i01.moveArm("left",85,93,42,16)
i01.moveArm("right",87,93,37,18)
i01.moveHand("left",124,82,65,81,41,143)
i01.moveHand("right",59,53,89,61,36,21)
i01.moveTorso(90,90,90)
sleep(1)
relax()
if (data == "i know"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("yes, me too")
if x == 2:
i01.mouth.speak("I do too")
if x == 3:
i01.mouth.speak("sorry about that")
if (data == "sorry"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no problems")
if x == 2:
i01.mouth.speak("it doesn't matter")
if x == 3:
i01.mouth.speak("it's okay")
if (data == "nice"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("I know")
if x == 2:
i01.mouth.speak("yes, indeed")
if x == 3:
i01.mouth.speak("you are damn right")
if (data == "hello"):
hello()
relax()
if (data == "bye bye"):
i01.mouth.speak("see you soon")
global helvar
helvar = 1
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speak("i'm looking forward to see you again")
if x == 2:
i01.mouth.speak("goodbye")
if (data == "thank you"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("you are welcome")
if x == 2:
i01.mouth.speak("my pleasure")
if x == 3:
i01.mouth.speak("it's okay")
if (data == "thanks"):
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speak("it's okay")
if x == 2:
i01.mouth.speak("sure")
if (data == "go forward"):
forwardServo.moveTo(10)
if (data == "go backwards"):
forwardServo.moveTo(170)
if (data == "watch out"):
forwardServo.moveTo(93)
if (data == "to the left"):
directionServo.moveTo(127)
if (data == "to the right"):
directionServo.moveTo(40)
if (data == "go straight"):
directionServo.moveTo(83)
#elif (data == "disconnect wheel"):
#directionServo.detach()
#forwardServo.detach()
#elif (data == "attach wheel"):
#directionServo.attach()
#forwardServo.attach()
if (data == "how do you do"):
if helvar <= 2:
i01.mouth.speak("I'm fine thank you")
global helvar
helvar += 1
elif helvar == 3:
i01.mouth.speak("you have already said that at least twice")
i01.moveArm("left",43,88,22,10)
i01.moveArm("right",20,90,30,10)
i01.moveHand("left",0,0,0,0,0,119)
i01.moveHand("right",0,0,0,0,0,119)
sleep(2)
relax()
global helvar
helvar += 1
elif helvar == 4:
i01.mouth.speak("what is your problem stop saying how do you do all the time")
i01.moveArm("left",30,83,22,10)
i01.moveArm("right",40,85,30,10)
i01.moveHand("left",130,180,180,180,180,119)
i01.moveHand("right",130,180,180,180,180,119)
sleep(2)
relax()
global helvar
helvar += 1
elif helvar == 5:
i01.mouth.speak("i will ignore you if you say how do you do one more time")
unhappy()
sleep(4)
relax()
global helvar
helvar += 1
if (data == "i love you"):
i01.mouth.speak("i love you too")
i01.moveHead(116,80)
i01.moveArm("left",85,93,42,16)
i01.moveArm("right",87,93,37,18)
i01.moveHand("left",124,82,65,81,41,143)
i01.moveHand("right",59,53,89,61,36,21)
i01.moveTorso(90,90,90)
sleep(0.2)
sleep(1)
relax()
data = msg_i01_ear_recognized.data[0]
if (data == "what is the weather"):
if weathervar <= 2:
i01.mouth.speak("I have no idea, I am not connected to internet")
global weathervar
weathervar += 1
elif weathervar == 3:
i01.mouth.speak("Sorry, I told you, I am not connected to internet")
i01.moveArm("left",43,88,22,10)
i01.moveArm("right",20,90,30,10)
i01.moveHand("left",0,0,0,0,0,119)
i01.moveHand("right",0,0,0,0,0,119)
sleep(2)
relax()
global weathervar
weathervar += 1
elif weathervar == 4:
i01.mouth.speak("Gael, you are annoying, stop asking me the weather")
i01.moveArm("left",30,83,22,10)
i01.moveArm("right",40,85,30,10)
i01.moveHand("left",130,180,180,180,180,119)
i01.moveHand("right",130,180,180,180,180,119)
sleep(2)
relax()
global weathervar
weathervar += 1
elif weathervar == 5:
i01.setHeadSpeed(0.95, 0.95, 0.90, 0.90, 1.0)
i01.moveHead(80,66)
sleep(1)
i01.setHeadSpeed(0.95, 0.95, 0.90, 0.90, 1.0)
i01.moveHead(80,110)
sleep(1)
i01.setHeadSpeed(0.95, 0.95, 0.90, 0.90, 1.0)
i01.moveHead(80,66)
sleep(1)
i01.setHeadSpeed(0.95, 0.95, 0.90, 0.90, 1.0)
i01.moveHead(80,110)
sleep(1)
i01.setHeadSpeed(0.95, 0.95, 0.90, 0.90, 1.0)
i01.moveHead(80,66)
sleep(1)
i01.mouth.speak("Well, well, Humans are worst than robots, they never learn")
fullspeed()
i01.moveArm("left",85,106,25,18)
i01.moveArm("right",87,107,32,18)
i01.moveHand("left",110,62,56,88,81,145)
i01.moveHand("right",78,88,101,95,81,27)
i01.moveTorso(90,90,90)
sleep(4)
relax()
global weathervar
weathervar += 1
def startkinect():
i01.leftArm.shoulder.map(0,180,-25,105)
i01.rightArm.shoulder.map(0,180,-30,100)
i01.copyGesture(True)
def offkinect():
i01.leftArm.shoulder.map(0,180,0,180)
i01.rightArm.shoulder.map(0,180,0,180)
i01.copyGesture(False)
rest()
def trackHumans():
i01.headTracking.faceDetect()
i01.eyesTracking.faceDetect()
fullspeed()
def trackPoint():
i01.headTracking.startLKTracking()
i01.eyesTracking.startLKTracking()
fullspeed()
def stopTracking():
i01.headTracking.stopTracking()
i01.eyesTracking.stopTracking()
def takethis():
fullspeed()
i01.moveHead(58,96)
i01.moveArm("left",13,45,95,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",2,2,2,2,2,15)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(79,90,90)
sleep(3)
closelefthand()
sleep(2)
isitaball()
def fistHips():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(138,80)
i01.moveArm("left",79,42,23,41)
i01.moveArm("right",71,40,14,39)
i01.moveHand("left",180,180,180,180,180,47)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(90,90,90)
def unhappy():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(85,40)
i01.moveArm("left",79,42,23,41)
i01.moveArm("right",71,40,14,39)
i01.moveHand("left",180,180,180,180,180,47)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(90,90,90)
def rest():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(80,86,82,78,76)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
i01.moveTorso(90,90,90)
def fullspeed():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0, 1.0, 1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
def delicategrab():
i01.setHandSpeed("left", 0.70, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(21,98)
i01.moveArm("left",30,72,77,10)
i01.moveArm("right",0,91,28,17)
i01.moveHand("left",180,130,4,0,0,180)
i01.moveHand("right",86,51,133,162,153,180)
def perfect():
i01.setHandSpeed("left", 0.80, 0.80, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 0.85, 0.85, 0.85, 0.95)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(88,79)
i01.moveArm("left",89,75,93,11)
i01.moveArm("right",0,91,28,17)
i01.moveHand("left",130,160,83,40,0,34)
i01.moveHand("right",86,51,133,162,153,180)
def fisthips():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(138,80)
i01.moveArm("left",79,45,23,41)
i01.moveArm("right",71,40,14,39)
i01.moveHand("left",180,180,180,180,180,47)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(90,90,90)
def releasedelicate():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 0.75, 0.75, 0.75, 0.95)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(20,98)
i01.moveArm("left",30,72,64,10)
i01.moveArm("right",0,91,28,17)
i01.moveHand("left",101,74,66,58,44,180)
i01.moveHand("right",86,51,133,162,153,180)
def grabthebottle():
i01.setHandSpeed("left", 1.0, 0.80, 0.80, 0.80, 1.0, 0.80)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.90, 0.80)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(20,88)
i01.moveArm("left",77,85,45,20)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,138,140,164,180,60)
i01.moveHand("right",0,0,0,0,0,90)
i01.moveTorso(70,90,90)
def grabtheglass():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 0.60, 0.60, 1.0, 1.0, 0.80)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveHead(20,68)
i01.moveArm("left",77,85,45,15)
i01.moveArm("right",48,91,72,20)
i01.moveHand("left",180,138,140,164,180,50)
i01.moveHand("right",140,112,127,105,143,140)
i01.moveTorso(105,90,90)
def poorbottle():
i01.setHandSpeed("left", 0.60, 0.60, 0.60, 0.60, 0.60, 0.60)
i01.setHandSpeed("right", 0.60, 0.80, 0.60, 0.60, 0.60, 0.60)
i01.setArmSpeed("left", 0.60, 0.60, 0.60, 0.60)
i01.setArmSpeed("right", 0.60, 0.60, 0.60, 0.60)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(0,92)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveArm("left",55,40,92,55)
i01.moveArm("right",90,66,34,10)
i01.moveHand("left",180,140,150,164,180,0)
i01.moveHand("right",145,112,127,105,143,150)
i01.moveTorso(90,90,90)
def givetheglass():
sleep(2)
i01.setHandSpeed("left", 0.60, 0.60, 0.60, 0.60, 0.60, 0.60)
i01.setHandSpeed("right", 0.60, 0.80, 0.60, 0.60, 0.60, 0.60)
i01.setArmSpeed("left", 0.60, 1.0, 0.60, 0.60)
i01.setArmSpeed("right", 0.60, 0.60, 0.60, 0.60)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(84,79)
i01.moveArm("left",77,75,45,17)
i01.moveArm("right",21,80,77,10)
i01.moveHand("left",109,138,180,164,180,60)
i01.moveHand("right",102,86,105,105,143,133)
i01.mouth.speakBlocking("Hello please take the glass")
sleep(1)
def takeball():
rest()
i01.setHandSpeed("right", 0.85, 0.75, 0.75, 0.75, 0.85, 0.75)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(30,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,76,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,140,140,3,0,11)
i01.moveTorso(120,100,90)
def getball():
rest()
i01.setHandSpeed("right", 0.85, 0.75, 0.75, 0.75, 0.85, 0.75)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 0.85)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(45,65)
i01.moveArm("left",5,90,16,15)
i01.moveArm("right",6,85,110,22)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",0,0,0,3,0,11)
i01.moveTorso(101,100,90)
sleep(2.5)
i01.moveHand("right",180,140,140,3,0,11)
def keepball():
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(20,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",54,77,55,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,140,140,3,0,11)
i01.moveTorso(90,90,90)
def approachlefthand():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.25, 0.25, 0.25, 0.25)
i01.setHeadSpeed(0.65, 0.65)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(20,84)
i01.moveArm("left",67,52,62,23)
i01.moveArm("right",55,61,45,16)
i01.moveHand("left",130,0,40,10,10,0)
i01.moveHand("right",180,145,145,3,0,11)
i01.moveTorso(90,85,90)
sleep(4)
def uselefthand():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.25, 0.25, 0.25, 0.25)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(10,80)
i01.moveArm("left",64,52,59,23)
i01.moveArm("right",75,61,50,16)
i01.moveHand("left",130,0,40,10,10,0)
i01.moveHand("right",180,140,145,3,0,11)
sleep(4)
def more():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 0.85, 0.80, 0.85, 0.95)
i01.setArmSpeed("right", 0.75, 0.65, 0.65, 0.65)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(13,80)
i01.moveArm("left",64,52,59,23)
i01.moveArm("right",75,60,50,16)
i01.moveHand("left",140,148,140,10,10,0)
i01.moveHand("right",80,114,114,3,0,11)
sleep(3)
def handdown():
i01.setHandSpeed("left", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
i01.setHandSpeed("right", 0.70, 0.70, 0.70, 0.70, 0.70, 1.0)
i01.setArmSpeed("right", 0.85, 0.65, 0.65, 0.65)
i01.moveHead(18,75)
i01.moveArm("left",66,52,59,23)
i01.moveArm("right",59,60,50,16)
i01.moveHand("left",140,148,140,10,10,0)
i01.moveHand("right",54,95,66,0,0,11)
sleep(2)
def isitaball():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 0.8, 0.8, 0.90)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 0.95, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.90, 0.85)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(70,82)
i01.moveArm("left",70,59,95,15)
i01.moveArm("right",12,74,33,15)
i01.moveHand("left",170,150,180,180,180,164)
i01.moveHand("right",105,81,78,57,62,105)
def putitdown():
i01.setHandSpeed("left", 0.90, 0.90, 0.90, 0.90, 0.90, 0.90)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(20,99)
i01.moveArm("left",5,45,87,31)
i01.moveArm("right",5,82,33,15)
i01.moveHand("left",147,130,135,34,34,35)
i01.moveHand("right",20,40,40,30,30,72)
def dropit():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 1.0, 0.85)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(20,99)
i01.moveArm("left",5,45,87,31)
i01.moveArm("right",5,82,33,15)
sleep(3)
i01.moveHand("left",60,61,67,34,34,35)
i01.moveHand("right",20,40,40,30,30,72)
def removeleftarm():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(20,100)
i01.moveArm("left",71,94,41,31)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",60,43,45,34,34,35)
i01.moveHand("right",20,40,40,30,30,72)
def relax():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(79,100)
i01.moveArm("left",5,84,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(90,90,90)
def armsUp():
i01.setHeadSpeed(1.0,1.0)
i01.moveHead(180,86)
sleep(1)
i01.setHandSpeed("left",0.90,0.90,0.90,0.90,0.90,1.0)
i01.setHandSpeed("right",0.90,0.90,0.90,0.90,0.90,1.0)
i01.moveHand("left",170,170,170,170,170,33)
i01.moveHand("right",170,170,170,170,170,180)
sleep(3)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveArm("left",90,90,170,20)
i01.moveArm("right",90,90,173,20)
sleep(9)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.moveHead(180,86)
i01.moveArm("left",5,90,170,10)
i01.moveArm("right",5,90,173,10)
i01.moveHand("left",2,2,2,2,2,33)
i01.moveHand("right",2,2,2,2,2,180)
i01.moveTorso(90,90,90)
def handopen():
i01.moveHand("left",0,0,0,0,0)
i01.moveHand("right",0,0,0,0,0)
def handclose():
i01.moveHand("left",180,180,180,180,180)
i01.moveHand("right",180,180,180,180,180)
def openlefthand():
i01.moveHand("left",0,0,0,0,0)
def openrighthand():
i01.moveHand("right",0,0,0,0,0)
def closelefthand():
i01.moveHand("left",180,180,180,180,180)
def closerighthand():
i01.moveHand("right",180,180,180,180,180)
def surrender():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(90,90)
i01.moveArm("left",90,139,15,79)
i01.moveArm("right",90,145,37,79)
i01.moveHand("left",50,28,30,10,10,76)
i01.moveHand("right",10,10,10,10,10,139)
def pictureleftside():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(109,90)
i01.moveArm("left",90,105,24,75)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",50,86,97,74,106,119)
i01.moveHand("right",81,65,82,60,105,113)
def picturerightside():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(109,90)
i01.moveArm("left",5,94,28,15)
i01.moveArm("right",90,115,23,68)
i01.moveHand("left",42,58,87,55,71,35)
i01.moveHand("right",10,112,95,91,125,45)
def picturebothside():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(109,90)
i01.moveJaw(50)
i01.moveArm("left",90,105,24,75)
i01.moveArm("right",90,115,23,68)
i01.moveHand("left",50,86,97,74,106,119)
i01.moveHand("right",10,112,95,91,125,45)
def lookrightside():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,40)
def lookleftside():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,140)
def lookinmiddle():
i01.setHeadSpeed(0.70, 0.70)
i01.moveHead(85,86)
def eyesfront():
i01.head.eyeX.moveTo(85)
i01.head.eyeY.moveTo(85)
def eyesdown():
i01.head.eyeY.moveTo(180)
def eyesupp():
i01.head.eyeY.moveTo(0)
def eyesright():
i01.head.eyeX.moveTo(0)
def eyesleft():
i01.head.eyeX.moveTo(180)
def headfront():
i01.head.neck.moveTo(90)
i01.head.rothead.moveTo(90)
def headdown():
i01.head.neck.moveTo(0)
def headupp():
i01.head.neck.moveTo(180)
def headright():
i01.head.rothead.moveTo(0)
def headleft():
i01.head.rothead.moveTo(180)
def Torso():
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveTorso(60,90,90)
sleep(2)
i01.moveTorso(120,90,90)
sleep(2)
i01.moveTorso(90,90,90)
sleep(2)
def muscle():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.65, 0.65)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(90,129)
i01.moveArm("left",90,139,48,75)
i01.moveArm("right",71,40,14,43)
i01.moveHand("left",180,180,180,180,180,148)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(120,100,90)
sleep(4)
i01.mouth.speakBlocking("Looks good, doesn't it")
sleep(2)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(90,45)
i01.moveArm("left",44,46,20,39)
i01.moveArm("right",90,145,58,74)
i01.moveHand("left",180,180,180,180,180,83)
i01.moveHand("right",99,130,152,154,145,21)
i01.moveTorso(60,75,90)
sleep(3)
i01.mouth.speakBlocking("not bad either, don't you think")
sleep(4)
relax()
sleep(1)
def shakehand():
data = msg_i01_ear_recognized.data[0]
##rest
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(80,86)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
i01.moveTorso(90,90,90)
sleep(1)
##move arm and hand
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(39,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,65,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",50,50,40,20,20,90)
i01.moveTorso(101,100,90)
sleep(1)
##close the hand
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.75, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(39,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,62,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,126,120,145,168,77)
i01.moveTorso(101,100,90)
sleep(3)
##shake hand up
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(85,90)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,70,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,126,120,145,168,77)
i01.moveTorso(101,100,90)
sleep(1)
##shake hand down
if (data == "shake hand"):
x = (random.randint(1, 4))
if x == 1:
i01.mouth.speak("please to meet you")
if x == 2:
i01.mouth.speak("carefull my hand is made out of plastic")
if x == 3:
i01.mouth.speak("I am happy to shake a human hand")
if x == 4:
i01.mouth.speak("it is a pleasure to meet you")
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.75, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(85,90)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,60,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,126,120,145,168,77)
i01.moveTorso(101,100,90)
sleep(1)
##shake hand up
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.75, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(85,90)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,75,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,126,120,145,168,77)
i01.moveTorso(101,100,90)
sleep(1)
##shake hand down
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.75, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(82,88)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,62,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,126,120,145,168,77)
i01.moveTorso(101,100,90)
sleep(2)
## release hand
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.95, 0.95, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(39,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",6,73,62,16)
i01.moveHand("left",50,50,40,20,20,77)
i01.moveHand("right",20,50,40,20,20,90)
i01.moveTorso(101,100,90)
sleep(1)
##relax
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.85)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(79,100)
i01.moveArm("left",5,84,28,15)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",10,50,40,20,20,113)
i01.moveTorso(90,90,90)
def power_down():
relax()
i01.powerDown()
##sleep(2)
##ear.pauseListening()
##relax()
##i01.mouth.speakBlocking()
##sleep(2)
##i01.moveHead(40, 85);
##sleep(4)
##rightSerialPort.digitalWrite(53, Arduino.LOW)
##leftSerialPort.digitalWrite(53, Arduino.LOW)
ear.lockOutAllGrammarExcept("power up")
sleep(2)
ear.resumeListening()
def power_up():
##sleep(2)
##ear.pauseListening()
##rightSerialPort.digitalWrite(53, Arduino.HIGH)
##leftSerialPort.digitalWrite(53, Arduino.HIGH)
i01.mouth.speakBlocking("I was sleeping")
lookrightside()
sleep(2)
lookleftside()
sleep(4)
relax()
ear.clearLock()
sleep(2)
ear.resumeListening()
def hello():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(105,78)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,144,60,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",0,0,0,50,82,180)
ear.pauseListening()
sleep(1)
for w in range(0,3):
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.60, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
if w==1:
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.65, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,70)
i01.mouth.speakBlocking("hello, my name is inmov")
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",57,145,50,68)
i01.moveHand("left",100,90,85,80,71,15)
i01.moveHand("right",3,0,31,12,26,45)
sleep(1)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(79,100)
i01.moveArm("left",5,94,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",42,58,42,55,71,35)
i01.moveHand("right",81,50,82,60,105,113)
ear.resumeListening()
def italianhello():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(105,78)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,144,60,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",0,0,0,50,82,180)
ear.pauseListening()
sleep(1)
for w in range(0,3):
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.60, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
if w==1:
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.65, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,70)
i01.mouth.speakBlocking("ciao , il mio nome e inmoov one")
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",57,145,50,68)
i01.moveHand("left",100,90,85,80,71,15)
i01.moveHand("right",3,0,31,12,26,45)
sleep(1)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(79,100)
i01.moveArm("left",5,94,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",42,58,42,55,71,35)
i01.moveHand("right",81,50,82,60,105,113)
ear.resumeListening()
def photo():
i01.moveHead(87,60)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",46,147,5,75)
i01.moveHand("left",138,52,159,106,120,90)
i01.moveHand("right",80,65,94,63,70,140)
def beforehappy():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(84,88)
i01.moveArm("left",5,82,36,11)
i01.moveArm("right",74,112,61,29)
i01.moveHand("left",0,88,135,94,96,90)
i01.moveHand("right",81,79,118,47,0,90)
def happy():
for w in range(0,3):
sleep(1)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(84,88)
i01.moveArm("left",5,82,36,10)
i01.moveArm("right",74,112,61,29)
i01.moveHand("left",0,88,135,94,96,90)
i01.moveHand("right",81,79,118,47,0,90)
sleep(1)
if w==1:
i01.mouth.speakBlocking("happy birthday grog")
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(42,76)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",74,70,61,10)
i01.moveHand("left",0,0,0,0,0,90)
i01.moveHand("right",81,79,118,47,0,90)
sleep(5)
ear.resumeListening()
def about():
sleep(2)
ear.pauseListening()
sleep(2)
i01.setArmSpeed("right", 0.1, 0.1, 0.2, 0.2);
i01.setArmSpeed("left", 0.1, 0.1, 0.2, 0.2);
i01.setHeadSpeed(0.2,0.2)
i01.moveArm("right", 64, 94, 10, 10);
i01.mouth.speakBlocking("I am the first life size humanoid robot you can 3D print and animate")
i01.moveHead(65,66)
i01.moveArm("left", 64, 104, 10, 11);
i01.moveArm("right", 44, 84, 10, 11);
i01.mouth.speakBlocking("my designer creator is Gael Langevin a French sculptor, model maker")
i01.moveHead(75,86)
i01.moveArm("left", 54, 104, 10, 11);
i01.moveArm("right", 64, 84, 10, 20);
i01.mouth.speakBlocking("who has released my files to the opensource 3D world.")
i01.moveHead(65,96)
i01.moveArm("left", 44, 94, 10, 20);
i01.moveArm("right", 54, 94, 20, 11);
i01.mouth.speakBlocking("this is where my builder downloaded my files.")
i01.moveHead(75,76)
i01.moveArm("left", 64, 94, 20, 11);
i01.moveArm("right", 34, 94, 10, 11);
i01.mouth.speakBlocking("after five hundred hours of printing, four kilos of plastic, twenty five hobby servos, blood and sweat.I was brought to life") # should be " i was borght to life."
i01.moveHead(65,86)
i01.moveArm("left", 24, 94, 10, 11);
i01.moveArm("right", 24, 94, 10, 11);
i01.mouth.speakBlocking("so if You have a 3D printer, some building skills, then you can build your own version of me") # mabe add in " alot of money"
i01.moveHead(85,86)
i01.moveArm("left", 5, 94, 20, 30);
i01.moveArm("right", 24, 124, 10, 20);
i01.mouth.speakBlocking("and if enough people build me, some day my kind could take over the world") # mabe add in " alot of money"
i01.moveHead(75,96)
i01.moveArm("left", 24, 104, 10, 11);
i01.moveArm("right", 5, 94, 20, 30);
i01.mouth.speakBlocking("I'm just kidding. i need some legs to get around, and i have to over come my pyro-phobia, a fear of fire") # mabe add in " alot of money"
i01.moveHead(75,96)
i01.moveArm("left", 5, 94, 10, 11)
i01.moveArm("right", 4, 94, 10, 11);
i01.mouth.speakBlocking("so, until then. i will be humankind's humble servant")
i01.rest()
i01.setArmSpeed("right", 1, 1, 1, 1);
i01.setArmSpeed("left", 1, 1, 1, 1);
i01.setHeadSpeed(1,1)
sleep(2)
ear.resumeListening()
def servos():
ear.pauseListening()
sleep(2)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(79,100)
i01.moveArm("left",5,119,28,15)
i01.moveArm("right",5,111,28,15)
i01.moveHand("left",42,58,87,55,71,35)
i01.moveHand("right",81,20,82,60,105,113)
i01.mouth.speakBlocking("I currently have twenty five hobby servos installed in my body to give me life")
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(124,90)
i01.moveArm("left",89,94,91,35)
i01.moveArm("right",20,67,31,22)
i01.moveHand("left",106,41,161,147,138,90)
i01.moveHand("right",0,0,0,54,91,90)
i01.mouth.speakBlocking("there's one servo for moving my mouth up and down")
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 1.0, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(105,76);
i01.moveArm("left",89,106,103,35);
i01.moveArm("right",35,67,31,22);
i01.moveHand("left",106,0,0,147,138,7);
i01.moveHand("right",0,0,0,54,91,90);
i01.mouth.speakBlocking("two for my eyes")
sleep(0.2)
i01.setHandSpeed("left", 0.85, 0.85, 1.0, 1.0, 1.0, 0.85)
i01.moveHand("left",106,0,0,0,0,7);
i01.mouth.speakBlocking("and two more for my head")
sleep(0.5)
i01.setHandSpeed("left", 0.85, 0.9, 0.9, 0.9, 0.9, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(90,40);
i01.moveArm("left",89,106,103,35);
i01.moveArm("right",35,67,31,20);
i01.moveHand("left",106,140,140,140,140,7);
i01.moveHand("right",0,0,0,54,91,90);
i01.mouth.speakBlocking("so i can look around")
sleep(0.5)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(105,125);
i01.setArmSpeed("left", 0.9, 0.9, 0.9, 0.9)
i01.moveArm("left",60,100,85,30);
i01.mouth.speakBlocking("and see who's there")
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(40,56);
sleep(0.5)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0);
i01.setArmSpeed("right", 0.5, 0.6, 0.5, 0.6);
i01.moveArm("left",87,41,64,11)
i01.moveArm("right",5,95,40,11)
i01.moveHand("left",98,150,160,160,160,104)
i01.moveHand("right",0,0,50,54,91,90);
i01.mouth.speakBlocking("there's three servos in each shoulder")
i01.moveHead(40,67);
sleep(2)
i01.setHandSpeed("left", 0.8, 0.9, 0.8, 0.8, 0.8, 0.8)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.moveHead(43,69)
i01.moveArm("left",87,41,64,11)
i01.moveArm("right",5,95,40,42)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("left",42,10,160,160,160,35)
i01.moveHand("right",81,20,82,60,105,113)
i01.mouth.speakBlocking("here is the first servo movement")
sleep(1)
i01.moveHead(37,60);
i01.setHandSpeed("left", 1.0, 1.0, 0.9, 0.9, 1.0, 0.8)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("right",5,95,67,42)
i01.moveHand("left",42,10,10,160,160,30)
i01.mouth.speakBlocking("this is the second one")
sleep(1)
i01.moveHead(43,69);
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("right",5,134,67,42)
i01.moveHand("left",42,10,10,10,160,35)
i01.mouth.speakBlocking("now you see the third")
sleep(1)
i01.setArmSpeed("right", 0.8, 0.8, 0.8, 0.8)
i01.moveArm("right",20,90,45,16)
i01.mouth.speakBlocking("they give me a more human like movement")
sleep(1)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0);
i01.moveHead(43,72)
i01.moveArm("left",90,44,66,11)
i01.moveArm("right",90,100,67,26)
i01.moveHand("left",42,80,100,80,113,35)
i01.moveHand("right",81,0,82,60,105,69)
i01.mouth.speakBlocking("but, i have only one servo, to move each elbow")
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.moveHead(45,62)
i01.moveArm("left",72,44,90,11)
i01.moveArm("right",90,95,68,15)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right",81,0,82,60,105,0)
i01.mouth.speakBlocking("that, leaves me, with one servo per wrist")
i01.moveHead(40,60)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 0.9, 0.9, 0.9, 0.9, 0.9, 0.9)
i01.moveArm("left",72,44,90,9)
i01.moveArm("right",90,95,68,15)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right", 10, 140,82,60,105,10)
i01.mouth.speakBlocking("and one servo for each finger.")
sleep(0.5)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right", 50, 51, 15,23, 30,140);
i01.mouth.speakBlocking("these servos are located in my forearms")
i01.setHandSpeed("left", 0.8, 0.8, 0.8, 0.8,0.8, 0.8)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.moveHand("left", 36, 52, 8,22, 20);
i01.moveHand("right", 120, 147, 130,110, 125);
removeleftarm()
sleep(1)
i01.mouth.speakBlocking("they are hooked up, by the use of tendons")
i01.moveHand("left",10,20,30,40,60,150);
i01.moveHand("right",110,137,120,100,105,130);
i01.setHeadSpeed(1,1)
i01.setArmSpeed("right", 1.0,1.0, 1.0, 1.0);
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0);
relax()
sleep(2)
ear.resumeListening()
def howmanyfingersdoihave():
ear.pauseListening()
sleep(1)
fullspeed()
i01.moveHead(49,74)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",74,140,150,157,168,92)
i01.moveHand("right",89,80,98,120,114,0)
sleep(2)
i01.moveHand("right",0,80,98,120,114,0)
i01.mouth.speakBlocking("ten")
sleep(.1)
i01.moveHand("right",0,0,98,120,114,0)
i01.mouth.speakBlocking("nine")
sleep(.1)
i01.moveHand("right",0,0,0,120,114,0)
i01.mouth.speakBlocking("eight")
sleep(.1)
i01.moveHand("right",0,0,0,0,114,0)
i01.mouth.speakBlocking("seven")
sleep(.1)
i01.moveHand("right",0,0,0,0,0,0)
i01.mouth.speakBlocking("six")
sleep(.5)
i01.setHeadSpeed(.70,.70)
i01.moveHead(40,105)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",0,0,0,0,0,180)
i01.moveHand("right",0,0,0,0,0,0)
sleep(0.1)
i01.mouth.speakBlocking("and five makes eleven")
sleep(0.7)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(40,50)
sleep(0.5)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(49,105)
sleep(0.7)
i01.setHeadSpeed(0.7,0.8)
i01.moveHead(40,50)
sleep(0.7)
i01.setHeadSpeed(0.7,0.8)
i01.moveHead(49,105)
sleep(0.7)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(90,85)
sleep(0.7)
i01.mouth.speakBlocking("eleven")
i01.moveArm("left",70,75,70,20)
i01.moveArm("right",60,75,65,20)
sleep(1)
i01.mouth.speakBlocking("that doesn't seem right")
sleep(2)
i01.mouth.speakBlocking("I think I better try that again")
i01.moveHead(40,105)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",140,168,168,168,158,90)
i01.moveHand("right",87,138,109,168,158,25)
sleep(2)
i01.moveHand("left",10,140,168,168,158,90)
i01.mouth.speakBlocking("one")
sleep(.1)
i01.moveHand("left",10,10,168,168,158,90)
i01.mouth.speakBlocking("two")
sleep(.1)
i01.moveHand("left",10,10,10,168,158,90)
i01.mouth.speakBlocking("three")
sleep(.1)
i01.moveHand("left",10,10,10,10,158,90)
i01.mouth.speakBlocking("four")
sleep(.1)
i01.moveHand("left",10,10,10,10,10,90)
i01.mouth.speakBlocking("five")
sleep(.1)
i01.setHeadSpeed(0.65,0.65)
i01.moveHead(53,65)
i01.moveArm("right",48,80,78,11)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.moveHand("left",10,10,10,10,10,90)
i01.moveHand("right",10,10,10,10,10,25)
sleep(1)
i01.mouth.speakBlocking("and five makes ten")
sleep(.5)
i01.mouth.speakBlocking("there that's better")
i01.moveHead(95,85)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",40,70,70,10)
sleep(0.5)
i01.mouth.speakBlocking("inmoov has ten fingers")
sleep(0.5)
i01.moveHead(90,90)
i01.setHandSpeed("left", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.setHandSpeed("right", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.moveHand("left",140,140,140,140,140,60)
i01.moveHand("right",140,140,140,140,140,60)
sleep(1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("left",5,90,30,11)
i01.moveArm("right",5,90,30,11)
sleep(0.5)
relax()
sleep(0.5)
ear.resumeListening()
def studyball():
##keepball():
i01.setHandSpeed("left", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setHandSpeed("right", 0.65, 0.65, 0.65, 0.65, 0.65, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.95, 0.85)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(20,70)
i01.moveArm("left",5,84,16,15)
i01.moveArm("right",54,77,55,16)
i01.moveHand("left",50,50,40,20,20,90)
i01.moveHand("right",180,145,145,3,0,11)
i01.moveTorso(90,90,90)
sleep(3)
##approachlefthand():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.25, 0.25, 0.25, 0.25)
i01.setHeadSpeed(0.65, 0.65)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(20,84)
i01.moveArm("left",67,52,62,23)
i01.moveArm("right",55,61,45,16)
i01.moveHand("left",130,0,40,10,10,0)
i01.moveHand("right",180,145,145,3,0,11)
i01.moveTorso(90,85,90)
sleep(4)
##uselefthand():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.25, 0.25, 0.25, 0.25)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(10,80)
i01.moveArm("left",64,52,59,23)
i01.moveArm("right",75,61,50,16)
i01.moveHand("left",130,0,40,10,10,0)
i01.moveHand("right",180,140,145,3,0,11)
sleep(4)
##more():
i01.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
i01.setArmSpeed("left", 0.85, 0.80, 0.85, 0.95)
i01.setArmSpeed("right", 0.75, 0.65, 0.65, 0.65)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(13,80)
i01.moveArm("left",64,52,59,23)
i01.moveArm("right",75,60,50,16)
i01.moveHand("left",140,148,140,10,10,0)
i01.moveHand("right",80,114,114,3,0,11)
sleep(3)
##handdown():
i01.setHandSpeed("left", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
i01.setHandSpeed("right", 0.70, 0.70, 0.70, 0.70, 0.70, 1.0)
i01.setArmSpeed("right", 0.85, 0.65, 0.65, 0.65)
i01.moveHead(18,75)
i01.moveArm("left",66,52,59,23)
i01.moveArm("right",59,60,50,16)
i01.moveHand("left",140,148,140,10,10,0)
i01.moveHand("right",54,95,66,0,0,11)
sleep(2)
#isitaball():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 0.8, 0.8, 0.90)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 0.95, 0.95, 0.85)
i01.setArmSpeed("left", 0.75, 0.85, 0.90, 0.85)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(70,82)
i01.moveArm("left",70,59,95,15)
i01.moveArm("right",12,74,33,15)
i01.moveHand("left",170,150,180,180,180,164)
i01.moveHand("right",105,81,78,57,62,105)
i01.mouth.speakBlocking("I will start tracking the object")
sleep(2)
i01.mouth.speakBlocking("you need to set the point")
fullspeed()
i01.headTracking.startLKTracking()
i01.eyesTracking.startLKTracking()
sleep()
def welcome():
sleep(1)
i01.setHandSpeed("left", 0.60, 0.60, 0.60, 0.60, 0.60, 0.60)
i01.setHandSpeed("right", 0.60, 0.80, 0.60, 0.60, 0.60, 0.60)
i01.setArmSpeed("left", 0.60, 0.60, 0.60, 0.60)
i01.setArmSpeed("right", 0.60, 0.60, 0.60, 0.60)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(80,90)
i01.moveArm("left",26,105,30,25)
i01.moveArm("right",37,124,30,27)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(1)
i01.mouth.speakBlocking("Welcome to the inmoov nation")
sleep(1)
def cyclegesture1():
welcome()
sleep(1)
relax()
servos()
def cyclegesture2():
##for x in range(5):
welcome()
sleep(1)
relax()
sleep(2)
fingerright()
sleep(1)
isitaball()
sleep(2)
removeleftarm()
sleep(2)
handdown()
sleep(1)
fullspeed()
i01.giving()
sleep(5)
removeleftarm()
sleep(4)
takeball()
sleep(1)
surrender()
sleep(6)
isitaball()
sleep(6)
dropit()
sleep(2)
removeleftarm()
sleep(5)
relax()
sleep()
fullspeed()
sleep(5)
madeby()
relax()
sleep(5)
i01.detach()
def cyclegesture3():
##for x in range(3):
rest()
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(50,110)
i01.moveArm("left",88,90,70,23)
i01.moveArm("right",73,90,70,27)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
i01.moveTorso(90,90,90)
sleep(2)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.9, 0.8)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(50,70)
i01.moveArm("left",88,90,75,28)
i01.moveArm("right",80,90,76,21)
i01.moveHand("left",180,180,180,180,180,90)
i01.moveHand("right",180,180,180,180,180,90)
i01.moveTorso(90,90,90)
sleep(1)
i01.setHandSpeed("left", 0.95, 0.95, 0.95, 0.95, 0.95, 1.0)
i01.setHandSpeed("right", 0.95, 0.95, 0.95, 0.95, 0.95, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.9, 0.8)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(40,70)
i01.moveArm("left",90,82,70,23)
i01.moveArm("right",80,82,68,27)
i01.moveHand("left",2,2,2,2,2,90)
i01.moveHand("right",2,2,2,2,2,90)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveHead(50,100)
i01.moveArm("left",88,90,70,28)
i01.moveArm("right",75,90,76,21)
i01.moveHand("left",180,180,180,180,180,10)
i01.moveHand("right",180,180,180,180,180,170)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveHead(50,70)
i01.moveArm("left",88,90,75,28)
i01.moveArm("right",80,90,76,21)
i01.moveHand("left",180,180,180,180,180,170)
i01.moveHand("right",180,180,180,180,180,10)
i01.moveTorso(90,90,90)
sleep(3)
i01.setHandSpeed("left", 0.9, 0.9, 0.9, 0.9, 0.9, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(79,160)
i01.moveArm("left",5,84,32,80)
i01.moveArm("right",87,82,123,74)
i01.moveHand("left",0,0,0,0,0,25)
i01.moveHand("right",0,0,0,0,0,113)
i01.moveTorso(170,90,90)
sleep(6)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(79,100)
i01.moveArm("left",18,84,55,71)
i01.moveArm("right",65,82,118,15)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(90,90,90)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 0.9, 0.9, 0.9, 0.9)
i01.setArmSpeed("right", 0.9, 0.9, 0.9, 0.9)
i01.setHeadSpeed(0.8, 0.8)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(60,50)
i01.moveArm("left",18,84,54,69)
i01.moveArm("right",65,82,118,13)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",180,180,180,180,180,113)
i01.moveTorso(40,90,90)
sleep(2)
i01.moveHead(79,100)
i01.moveArm("left",33,84,136,80)
i01.moveArm("right",34,82,160,13)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",180,180,180,180,180,113)
i01.moveTorso(90,90,90)
sleep(2)
##arm right up
i01.moveHead(100,100)
i01.moveArm("left",33,84,136,80)
i01.moveArm("right",34,82,160,20)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",92,33,37,71,66,113)
i01.moveTorso(90,90,90)
sleep(3)
i01.moveHead(110,120)
i01.moveArm("left",33,140,136,80)
i01.moveArm("right",34,82,170,30)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",92,33,37,71,66,113)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveHead(125,140)
i01.moveArm("left",33,90,36,60)
i01.moveArm("right",34,80,170,40)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",92,33,37,71,66,113)
i01.moveTorso(30,90,90)
sleep(2)
##arm left up
i01.moveHead(120,130)
i01.moveArm("left",33,90,36,60)
i01.moveArm("right",34,65,160,40)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",92,33,37,71,66,113)
i01.moveTorso(50,90,90)
sleep(2)
i01.moveHead(79,100)
i01.moveArm("left",18,84,54,69)
i01.moveArm("right",65,78,118,13)
i01.moveHand("left",92,33,37,71,66,30)
i01.moveHand("right",180,180,180,180,180,113)
i01.moveTorso(90,90,90)
sleep(1)
i01.moveHead(79,100)
i01.moveArm("left",18,84,55,71)
i01.moveArm("right",75,80,120,45)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(90,90,90)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 0.85)
i01.setHeadSpeed(0.9, 0.9)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(79,160)
i01.moveArm("left",24,84,32,74)
i01.moveArm("right",87,82,123,74)
i01.moveHand("left",0,0,0,0,0,25)
i01.moveHand("right",0,0,0,0,0,113)
i01.moveTorso(130,90,90)
sleep(3)
i01.moveHead(60,20)
i01.moveArm("left",87,82,123,74)
i01.moveArm("right",5,84,32,80)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(30,90,90)
sleep(6)
i01.setHeadSpeed(1.0,1.0)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.moveHead(80,86)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
i01.moveTorso(90,90,90)
sleep(2)
i01.mouth.speakBlocking("wow, I feel good, I love this")
sleep(2)
rest()
sleep(1)
relax()
def systemcheck():
sleep(2)
i01.setHeadSpeed(.75,.75)
i01.moveHead(90,90)
sleep(1)
i01.moveHead(72,64)
sleep(2)
i01.moveHead(155,94)
sleep(2)
i01.moveHead(90,138)
sleep(2)
i01.moveHead(29,95)
sleep(2)
i01.moveHead(90,90)
sleep(1.5)
i01.mouth.speakBlocking("Head, neck and mouth, check")
sleep(1)
i01.setHeadSpeed(.9,.9)
i01.moveHead(25,61)
i01.moveArm("left",0,90,30,10)
i01.setArmSpeed("right",.75,.75,.75,.75)
i01.moveArm("right",24,62,52,45)
i01.moveHand("left",0,0,0,0,0,90)
i01.moveHand("right",0,0,0,0,0,90)
sleep(2)
i01.moveHead(90,90)
i01.setHeadSpeed(.9,.9)
sleep(1)
i01.mouth.speakBlocking("right arm and right shoulder, check")
sleep(1)
i01.setHeadSpeed(.9,.9)
i01.moveHead(20,122)
i01.setArmSpeed("left",.75,.75,.75,.75)
i01.moveArm("left",24,62,52,45)
sleep(2)
i01.moveHead(90,90)
i01.setHeadSpeed(.9,.9)
sleep(1)
i01.mouth.speakBlocking("left arm and left shoulder, check")
sleep(1)
i01.setHeadSpeed(.9,.9)
i01.moveHead(20,120)
i01.moveArm("left",75,123,52,45)
i01.moveArm("right",75,123,52,45)
i01.moveHand("left",180,180,180,180,180,30)
i01.moveHand("right",180,180,180,180,180,170)
sleep(3)
i01.setHeadSpeed(.9,.9)
i01.moveHead(59,67)
i01.moveHand("right",0,0,0,0,0,19)
i01.moveHand("left",0,0,0,0,0,170)
sleep(1)
i01.moveHand("left",180,180,180,180,180,30)
i01.moveHand("right",180,180,180,180,180,170)
sleep(1.5)
i01.moveHead(90,90)
i01.setHeadSpeed(.9,.9)
sleep(1)
i01.mouth.speakBlocking(" hands and Wrists, check")
sleep(1)
i01.moveHead(90,90)
i01.moveArm("left",0,90,30,10)
i01.moveArm("right",0,90,30,10)
i01.moveHand("left",0,0,0,0,0,90)
i01.moveHand("right",0,0,0,0,0,90)
i01.mouth.speakBlocking("all servos are functioning properly")
sleep(1.5)
i01.mouth.speakBlocking("awaiting your commands")
sleep()
relax()
| apache-2.0 | -2,498,001,812,204,119,600 | 33.491919 | 566 | 0.581448 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20141126B.py | 1 | 1337 | """
[2014-11-26] Challenge #190 [Intermediate] Words inside of words
https://www.reddit.com/r/dailyprogrammer/comments/2nihz6/20141126_challenge_190_intermediate_words_inside/
#Description
This weeks challenge is a short yet interesting one that should hopefully help you exercise elegant solutions to a
problem rather than bruteforcing a challenge.
#Challenge
Given the wordlist [enable1.txt](http://www.joereynoldsaudio.com/enable1.txt), you must find the word in that file
which also contains the greatest number of words within that word.
For example, the word 'grayson' has the following words in it
Grayson
Gray
Grays
Ray
Rays
Son
On
Here's another example, the word 'reports' has the following
reports
report
port
ports
rep
You're tasked with finding the word in that file that contains the most words.
NOTE : If you have a different wordlist you would like to use, you're free to do so.
#Restrictions
* To keep output slightly shorter, a word will only be considered a word if it is 2 or more letters in length
* The word you are using may not be permuted to get a different set of words (You can't change 'report' to 'repotr' so
that you can add more words to your list)
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | -6,451,709,968,041,736,000 | 30.093023 | 118 | 0.769634 | false |
PeteAndersen/swarfarm | herders/templatetags/utils.py | 1 | 1598 | from django import template
register = template.Library()
@register.filter
def get_range(value):
if value is not None:
return range(value)
else:
return 0
@register.filter
def absolute(value):
return abs(value)
@register.filter
def subtract(value, arg):
return value - arg
@register.filter
def multiply(value, arg):
return value * arg
@register.filter
def remove_extension(string):
return string.replace('.png', '').replace("'", "").replace('(', '_').replace(')', '_')
# Get dictionary key by string
@register.filter
def key(d, key_name):
if type(d) == list:
return d[key_name]
else:
return d.get(key_name)
@register.filter
def humanize_number(value):
powers = [10 ** x for x in (0, 3, 6, 9, 12)]
human_powers = ('', 'K', 'M', 'B', 'T')
try:
index, hp = next((i, p) for i, p in enumerate(human_powers)
if 10 ** 3 > value / powers[i] > 0)
return_value = "%.{fraction_point}f".format(
fraction_point=1) % (float(value) / powers[index])
return_value = return_value \
if float(return_value) != int(float(return_value)) \
else str(int(float(return_value)))
return "%s%s" % (return_value, hp)
except (IndexError, StopIteration):
return value
@register.filter
def timedelta(delta):
if delta:
total_seconds = delta.total_seconds()
minutes = int(total_seconds // 60)
seconds = total_seconds - minutes * 60
return f'{minutes:02d}:{seconds:2.3f}'
else:
return ''
| apache-2.0 | -3,495,323,434,338,481,700 | 22.15942 | 90 | 0.586984 | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/E5Gui/E5ComboBox.py | 1 | 1955 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing combobox classes using the eric6 line edits.
"""
from __future__ import unicode_literals
from PyQt5.QtWidgets import QComboBox
class E5ComboBox(QComboBox):
"""
Class implementing a combobox using the eric6 line edit.
"""
def __init__(self, parent=None, inactiveText=""):
"""
Constructor
@param parent reference to the parent widget (QWidget)
@param inactiveText text to be shown on inactivity (string)
"""
super(E5ComboBox, self).__init__(parent)
self.setMinimumHeight(24)
from .E5LineEdit import E5LineEdit
self.__lineedit = E5LineEdit(self, inactiveText)
self.setLineEdit(self.__lineedit)
self.setMinimumHeight(self.__lineedit.minimumHeight() + 3)
def inactiveText(self):
"""
Public method to get the inactive text.
@return inactive text (string)
"""
return self.__lineedit.inactiveText()
def setInactiveText(self, inactiveText):
"""
Public method to set the inactive text.
@param inactiveText text to be shown on inactivity (string)
"""
self.__lineedit.setInactiveText()
class E5ClearableComboBox(E5ComboBox):
"""
Class implementing a combobox using the eric6 line edit.
"""
def __init__(self, parent=None, inactiveText=""):
"""
Constructor
@param parent reference to the parent widget (QWidget)
@param inactiveText text to be shown on inactivity (string)
"""
super(E5ClearableComboBox, self).__init__(parent, inactiveText)
from .E5LineEdit import E5ClearableLineEdit
self.__lineedit = E5ClearableLineEdit(self, inactiveText)
self.setLineEdit(self.__lineedit)
| gpl-3.0 | -3,208,561,469,416,457,000 | 27.75 | 71 | 0.617903 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.